code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin A : int = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class A ( UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' A__ = XLNetTokenizer A__ = XLNetTokenizerFast A__ = True A__ = True def lowerCamelCase__ (self : Union[str, Any] ) -> Dict: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XLNetTokenizer(__a , keep_accents=__a ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ (self : Dict ) -> int: """simple docstring""" lowercase__ = """<s>""" lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def lowerCamelCase__ (self : Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(__a ) , 1006 ) def lowerCamelCase__ (self : Tuple ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase__ (self : Tuple ) -> int: """simple docstring""" lowercase__ = XLNetTokenizer(__a , keep_accents=__a ) lowercase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] ) lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowercase__ = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCamelCase__ (self : Any ) -> Any: """simple docstring""" lowercase__ = XLNetTokenizer(__a , do_lower_case=__a ) lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def lowerCamelCase__ (self : int ) -> Dict: """simple docstring""" lowercase__ = XLNetTokenizer(__a , do_lower_case=__a ) lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def lowerCamelCase__ (self : str ) -> Optional[Any]: """simple docstring""" lowercase__ = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) lowercase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=__a ) lowercase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a ) lowercase__ = tokenizer.build_inputs_with_special_tokens(__a ) lowercase__ = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCamelCase__ (self : Optional[int] ) -> List[str]: """simple docstring""" lowercase__ = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
305
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
0
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :int = RobertaConfig SCREAMING_SNAKE_CASE__ :Any = "roberta" def __init__( self : int , __a : List[Any] ) -> List[str]: super().__init__(__a ) _UpperCamelCase : Optional[Any] = RobertaEmbeddings(__a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = RobertaConfig SCREAMING_SNAKE_CASE__ :Union[str, Any] = "roberta" def __init__( self : Tuple , __a : Optional[int] ) -> Optional[int]: super().__init__(__a ) _UpperCamelCase : Tuple = config.num_labels _UpperCamelCase : Dict = config.num_hidden_layers _UpperCamelCase : Optional[Any] = DeeRobertaModel(__a ) _UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) _UpperCamelCase : Tuple = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int=None , __a : List[str]=None , __a : Optional[Any]=None , __a : int=None , __a : Dict=None , __a : Optional[Any]=None , __a : int=None , __a : Dict=-1 , __a : Union[str, Any]=False , ) -> str: _UpperCamelCase : Tuple = self.num_layers try: _UpperCamelCase : Union[str, Any] = self.roberta( __a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , ) _UpperCamelCase : int = outputs[1] _UpperCamelCase : Any = self.dropout(__a ) _UpperCamelCase : Optional[int] = self.classifier(__a ) _UpperCamelCase : str = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _UpperCamelCase : str = e.message _UpperCamelCase : Tuple = e.exit_layer _UpperCamelCase : Optional[int] = outputs[0] if not self.training: _UpperCamelCase : List[Any] = entropy(__a ) _UpperCamelCase : List[Any] = [] _UpperCamelCase : Union[str, Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression _UpperCamelCase : List[Any] = MSELoss() _UpperCamelCase : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _UpperCamelCase : Tuple = CrossEntropyLoss() _UpperCamelCase : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _UpperCamelCase : Any = [] for highway_exit in outputs[-1]: _UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(__a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _UpperCamelCase : Optional[Any] = MSELoss() _UpperCamelCase : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _UpperCamelCase : Optional[Any] = CrossEntropyLoss() _UpperCamelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__a ) if train_highway: _UpperCamelCase : Optional[int] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _UpperCamelCase : Union[str, Any] = (loss,) + outputs if not self.training: _UpperCamelCase : int = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
310
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl" def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Union[str, Any] = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = max_position_embeddings _UpperCamelCase : Optional[Any] = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
310
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __UpperCAmelCase =2 class a__ : def __init__( self : int , *, # begin keyword-only arguments a : Dict="<s>" , a : Dict="<pad>" , a : int="</s>" , a : List[Any]="<unk>" , a : int=None , ): """simple docstring""" __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = bos, unk, pad, eos __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = {} __lowerCamelCase = self.add_symbol(a ) __lowerCamelCase = self.add_symbol(a ) __lowerCamelCase = self.add_symbol(a ) __lowerCamelCase = self.add_symbol(a ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(a ) __lowerCamelCase = len(self.symbols ) def __eq__( self : Dict , a : Tuple ): """simple docstring""" return self.indices == other.indices def __getitem__( self : List[Any] , a : Tuple ): """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : List[str] ): """simple docstring""" return len(self.symbols ) def __contains__( self : Optional[int] , a : List[str] ): """simple docstring""" return sym in self.indices @classmethod def SCREAMING_SNAKE_CASE__ ( cls : str , a : Dict ): """simple docstring""" __lowerCamelCase = cls() d.add_from_file(a ) return d def SCREAMING_SNAKE_CASE__ ( self : Any , a : Tuple , a : Any=1 , a : List[str]=False ): """simple docstring""" if word in self.indices and not overwrite: __lowerCamelCase = self.indices[word] __lowerCamelCase = self.count[idx] + n return idx else: __lowerCamelCase = len(self.symbols ) __lowerCamelCase = idx self.symbols.append(a ) self.count.append(a ) return idx def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Any ): """simple docstring""" return 0 def SCREAMING_SNAKE_CASE__ ( self : Any , a : str ): """simple docstring""" if isinstance(a , a ): try: with open(a , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(a ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(a ) ) return __lowerCamelCase = f.readlines() __lowerCamelCase = self._load_meta(a ) for line in lines[indices_start_line:]: try: __lowerCamelCase , __lowerCamelCase = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": __lowerCamelCase = True __lowerCamelCase , __lowerCamelCase = line.rsplit(''' ''' , 1 ) else: __lowerCamelCase = False __lowerCamelCase = int(a ) __lowerCamelCase = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(a ) ) self.add_symbol(a , n=a , overwrite=a ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} __lowerCamelCase = dict((re.sub(r'''@@$''' , '''''' , UpperCamelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , UpperCamelCase__ ), v) for k, v in d.items() ) __lowerCamelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] __lowerCamelCase = d[k] # restore return da def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: # prep if not os.path.exists(UpperCamelCase__ ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models __lowerCamelCase = os.path.join(UpperCamelCase__ , '''checkpoint.pt''' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) __lowerCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' ) __lowerCamelCase = chkpt['''cfg''']['''model'''] # dicts __lowerCamelCase = os.path.join(UpperCamelCase__ , '''dict.txt''' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) __lowerCamelCase = Dictionary.load(UpperCamelCase__ ) __lowerCamelCase = rewrite_dict_keys(src_dict.indices ) __lowerCamelCase = len(UpperCamelCase__ ) __lowerCamelCase = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # merges_file (bpecodes) __lowerCamelCase = os.path.join(UpperCamelCase__ , '''bpecodes''' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) __lowerCamelCase = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ ) # model config __lowerCamelCase = os.path.join(UpperCamelCase__ , '''config.json''' ) __lowerCamelCase = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.0_2, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1E-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # tokenizer config __lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 10_24, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # model __lowerCamelCase = chkpt['''model'''] # remove unneeded keys __lowerCamelCase = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): __lowerCamelCase = model_state_dict.pop(UpperCamelCase__ ) else: __lowerCamelCase = model_state_dict.pop(UpperCamelCase__ ) __lowerCamelCase = BioGptConfig.from_pretrained(UpperCamelCase__ ) __lowerCamelCase = BioGptForCausalLM(UpperCamelCase__ ) # check that it loads ok model_new.load_state_dict(UpperCamelCase__ ) # save __lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) print('''Conversion is done!''' ) if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCAmelCase =parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
67
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ): snake_case_ : Any = parent snake_case_ : List[str] = batch_size snake_case_ : List[Any] = seq_length snake_case_ : Optional[int] = is_training snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Optional[Any] = use_labels snake_case_ : Union[str, Any] = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Any = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Union[str, Any] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Tuple = max_position_embeddings snake_case_ : int = type_vocab_size snake_case_ : Tuple = type_sequence_label_size snake_case_ : str = initializer_range snake_case_ : Tuple = num_labels snake_case_ : str = num_choices snake_case_ : Any = scope snake_case_ : Dict = self.vocab_size - 1 def _snake_case ( self : int ): snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Optional[Any] = None if self.use_token_type_ids: snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : str = None snake_case_ : Dict = None snake_case_ : str = None if self.use_labels: snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : int = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ): snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ ) snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ ) snake_case_ : Optional[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ): snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ): snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ): snake_case_ : int = self.num_labels snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : int ): snake_case_ : Dict = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : str = config_and_inputs snake_case_ : str = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _lowerCAmelCase : Dict = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _lowerCAmelCase : int = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _lowerCAmelCase : Union[str, Any] = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ): snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , ) snake_case_ : int = inputs_dict['''labels'''] snake_case_ : Optional[Any] = inputs_dict['''labels'''] snake_case_ : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , ) snake_case_ : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def _snake_case ( self : Any ): snake_case_ : List[str] = OpenAIGPTModelTester(self ) snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 ) def _snake_case ( self : List[str] ): self.config_tester.run_common_tests() def _snake_case ( self : Optional[Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowercase_ ) def _snake_case ( self : List[str] ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase_ ) def _snake_case ( self : int ): snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowercase_ ) def _snake_case ( self : List[str] ): snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ ) @slow def _snake_case ( self : Dict ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch class _UpperCAmelCase ( unittest.TestCase): @slow def _snake_case ( self : Optional[int] ): snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(lowercase_ ) snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is snake_case_ : List[Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ ) self.assertListEqual(output_ids[0].tolist() , lowercase_ )
264
0
import argparse import datetime def lowerCamelCase_ ( _a ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } lowerCAmelCase__ : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_a ) < 11: raise ValueError('''Must be 10 characters long''' ) # Get month lowerCAmelCase__ : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('''Month must be between 1 - 12''' ) lowerCAmelCase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day lowerCAmelCase__ : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator lowerCAmelCase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year lowerCAmelCase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation lowerCAmelCase__ : Dict = datetime.date(int(_a ) , int(_a ) , int(_a ) ) # Start math if m <= 2: lowerCAmelCase__ : List[Any] = y - 1 lowerCAmelCase__ : List[str] = m + 12 # maths var lowerCAmelCase__ : int = int(str(_a )[:2] ) lowerCAmelCase__ : int = int(str(_a )[2:] ) lowerCAmelCase__ : int = int(2.6 * m - 5.39 ) lowerCAmelCase__ : int = int(c / 4 ) lowerCAmelCase__ : int = int(k / 4 ) lowerCAmelCase__ : int = int(d + k ) lowerCAmelCase__ : int = int(t + u + v + x ) lowerCAmelCase__ : int = int(z - (2 * c) ) lowerCAmelCase__ : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response lowerCAmelCase__ : str = f'Your date {date_input}, is a {days[str(_a )]}!' return response if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) lowerCamelCase = parser.parse_args() zeller(args.date_input)
370
import random from .binary_exp_mod import bin_exp_mod def lowerCamelCase_ ( _a , _a=1_000 ): """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCAmelCase__ : int = n - 1 lowerCAmelCase__ : Any = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCAmelCase__ : Optional[Any] = 0 while count < prec: lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 ) lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a ) if b != 1: lowerCAmelCase__ : Dict = True for _ in range(_a ): if b == n - 1: lowerCAmelCase__ : Union[str, Any] = False break lowerCAmelCase__ : Tuple = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCamelCase = abs(int(input('''Enter bound : ''').strip())) print('''Here\'s the list of primes:''') print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
211
0
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = [True] * limit __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): __SCREAMING_SNAKE_CASE = i * 2 while index < limit: __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = index + i __SCREAMING_SNAKE_CASE = [2] for i in range(3 , UpperCamelCase_ , 2 ): if is_prime[i]: primes.append(UpperCamelCase_ ) return primes def _lowerCAmelCase ( UpperCamelCase_ = 100_0000 ): __SCREAMING_SNAKE_CASE = prime_sieve(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i in range(len(UpperCamelCase_ ) ): for j in range(i + length , len(UpperCamelCase_ ) ): __SCREAMING_SNAKE_CASE = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __SCREAMING_SNAKE_CASE = j - i __SCREAMING_SNAKE_CASE = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
100
from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
0
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = False ) -> Dict: _SCREAMING_SNAKE_CASE : Any = scheduler _SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(__lowerCamelCase , (list, tuple) ) else [optimizers] _SCREAMING_SNAKE_CASE : List[Any] = split_batches _SCREAMING_SNAKE_CASE : List[str] = step_with_optimizer _SCREAMING_SNAKE_CASE : Union[str, Any] = GradientState() def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[str]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _SCREAMING_SNAKE_CASE : Dict = AcceleratorState().num_processes for _ in range(__lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) else: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self ) -> str: return self.scheduler.get_last_lr() def UpperCamelCase_ ( self ) -> Optional[int]: return self.scheduler.state_dict() def UpperCamelCase_ ( self , __lowerCamelCase ) -> str: self.scheduler.load_state_dict(__lowerCamelCase ) def UpperCamelCase_ ( self ) -> str: return self.scheduler.get_lr() def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]: return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase )
358
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any: _SCREAMING_SNAKE_CASE : str = parent _SCREAMING_SNAKE_CASE : List[Any] = 1_3 _SCREAMING_SNAKE_CASE : List[str] = 7 _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : int = True _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : int = 9_9 _SCREAMING_SNAKE_CASE : str = 3_8_4 _SCREAMING_SNAKE_CASE : List[Any] = 2 _SCREAMING_SNAKE_CASE : Dict = 4 _SCREAMING_SNAKE_CASE : Dict = 3_7 _SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu" _SCREAMING_SNAKE_CASE : str = 0.1 _SCREAMING_SNAKE_CASE : str = 0.1 _SCREAMING_SNAKE_CASE : List[Any] = 5_1_2 _SCREAMING_SNAKE_CASE : Tuple = 1_6 _SCREAMING_SNAKE_CASE : Dict = 2 _SCREAMING_SNAKE_CASE : Any = 0.02 _SCREAMING_SNAKE_CASE : Any = 3 _SCREAMING_SNAKE_CASE : List[str] = 4 _SCREAMING_SNAKE_CASE : List[Any] = 1_2_8 _SCREAMING_SNAKE_CASE : Optional[int] = 2 _SCREAMING_SNAKE_CASE : int = 9 _SCREAMING_SNAKE_CASE : List[str] = 1 _SCREAMING_SNAKE_CASE : List[Any] = None def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : List[str] = None if self.use_input_mask: _SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE : Dict = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Union[str, Any] = None _SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: _SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : str = [input_ids, input_mask] _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = self.num_labels _SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices _SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : List[Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = self.num_labels _SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int: _SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) : List[Any] = config_and_inputs _SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __snake_case = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __snake_case = False __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self ) _SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 ) def UpperCamelCase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) @slow def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : Any = True if hasattr(__lowerCamelCase , "use_cache" ): _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) _SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" ) _SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) if self.is_encoder_decoder: _SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"] _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"] else: _SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"] _SCREAMING_SNAKE_CASE : Dict = outputs["attentions"] self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) _SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) _SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase ) def check_decoder_attentions_output(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase ) self.assertEqual(out_len % 2 , 0 ) _SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : Any = False _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) if self.is_encoder_decoder: _SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_decoder_attentions_output(__lowerCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) # Check attention is always last and order is fine _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) ) self.assertEqual(model.config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) @require_tf class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) _SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8] self.assertEqual(output.shape , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
325
0
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowercase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex lowercase_ = 1_0 lowercase_ = 2_5_6 def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]: if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS: return None __a = MinHash(num_perm=lowerCAmelCase__ ) for token in set(lowerCAmelCase__ ): min_hash.update(token.encode() ) return min_hash def lowercase ( lowerCAmelCase__ : str ) -> Set[str]: return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0} class __lowerCAmelCase : '''simple docstring''' def __init__( self , *, _a = 0.85 , ): __a = duplication_jaccard_threshold __a = NUM_PERM __a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __a = defaultdict(_a ) def __UpperCAmelCase ( self , _a , _a ): __a = self._index.query(_a ) if code_key in self._index.keys: print(f'''Duplicate key {code_key}''' ) return self._index.insert(_a , _a ) if len(_a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_a ) break else: self._duplicate_clusters[close_duplicates[0]].add(_a ) def __UpperCAmelCase ( self ): __a = [] for base, duplicates in self._duplicate_clusters.items(): __a = [base] + list(_a ) # reformat the cluster to be a list of dict __a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_a ) return duplicate_clusters def __UpperCAmelCase ( self , _a ): __a = self.get_duplicate_clusters() with open(_a , '''w''' ) as f: json.dump(_a , _a ) def lowercase ( lowerCAmelCase__ : List[str] ) -> int: __a , __a = element __a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict: __a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase__ , lowerCAmelCase__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float: __a = get_tokens(lowerCAmelCase__ ) __a = get_tokens(lowerCAmelCase__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowercase_ = None def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any: __a = [] for elementa in cluster: __a = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: __a = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __a = 1 extremes.append(lowerCAmelCase__ ) return extremes def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]: global _shared_dataset __a = dataset __a = [] __a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ): extremes_list.append(lowerCAmelCase__ ) return extremes_list def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: __a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ ) __a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} __a = {} __a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for extremes in extremes_clusters: for element in extremes: __a = element __a = duplicate_indices - set(extreme_dict.keys() ) __a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __a = element['''base_index'''] in extreme_dict if element["is_extreme"]: __a = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' ) print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' ) print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' ) print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' ) print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' ) return ds_filter, duplicate_clusters
45
"""simple docstring""" def lowercase ( lowerCAmelCase__ : str ) -> list: if n_term == "": return [] __a = [] for temp in range(int(lowerCAmelCase__ ) ): series.append(f'''1/{temp + 1}''' if series else '''1''' ) return series if __name__ == "__main__": lowercase_ = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
45
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCamelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCamelCase__ = {'''facebook/blenderbot_small-90M''': 512} def lowerCAmelCase__ ( a__ ) ->Any: '''simple docstring''' _UpperCamelCase = set() _UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCamelCase = char _UpperCamelCase = set(a__ ) return pairs class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['''input_ids''', '''attention_mask'''] def __init__( self : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any]="__start__" , lowercase_ : Optional[int]="__end__" , lowercase_ : List[Any]="__unk__" , lowercase_ : List[str]="__null__" , **lowercase_ : Optional[int] , ) -> List[Any]: """simple docstring""" super().__init__(unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_) with open(lowercase_ , encoding="utf-8") as vocab_handle: _UpperCamelCase = json.load(lowercase_) _UpperCamelCase = {v: k for k, v in self.encoder.items()} with open(lowercase_ , encoding="utf-8") as merges_handle: _UpperCamelCase = merges_handle.read().split("\n")[1:-1] _UpperCamelCase = [tuple(merge.split()) for merge in merges] _UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_)))) _UpperCamelCase = {} @property def __UpperCAmelCase ( self : List[str]) -> int: """simple docstring""" return len(self.encoder) def __UpperCAmelCase ( self : Tuple) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str: """simple docstring""" if token in self.cache: return self.cache[token] _UpperCamelCase = re.sub("([.,!?()])" , R" \1" , lowercase_) _UpperCamelCase = re.sub("(')" , R" \1 " , lowercase_) _UpperCamelCase = re.sub(R"\s{2,}" , " " , lowercase_) if "\n" in token: _UpperCamelCase = token.replace("\n" , " __newln__") _UpperCamelCase = token.split(" ") _UpperCamelCase = [] for token in tokens: if not len(lowercase_): continue _UpperCamelCase = token.lower() _UpperCamelCase = tuple(lowercase_) _UpperCamelCase = tuple(list(word[:-1]) + [word[-1] + "</w>"]) _UpperCamelCase = get_pairs(lowercase_) if not pairs: words.append(lowercase_) continue while True: _UpperCamelCase = min(lowercase_ , key=lambda lowercase_: self.bpe_ranks.get(lowercase_ , float("inf"))) if bigram not in self.bpe_ranks: break _UpperCamelCase , _UpperCamelCase = bigram _UpperCamelCase = [] _UpperCamelCase = 0 while i < len(lowercase_): try: _UpperCamelCase = word.index(lowercase_ , lowercase_) new_word.extend(word[i:j]) _UpperCamelCase = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(lowercase_) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _UpperCamelCase = tuple(lowercase_) _UpperCamelCase = new_word if len(lowercase_) == 1: break else: _UpperCamelCase = get_pairs(lowercase_) _UpperCamelCase = "@@ ".join(lowercase_) _UpperCamelCase = word[:-4] _UpperCamelCase = word words.append(lowercase_) return " ".join(lowercase_) def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> List[str]: """simple docstring""" _UpperCamelCase = [] _UpperCamelCase = re.findall(R"\S+\n?" , lowercase_) for token in words: split_tokens.extend(list(self.bpe(lowercase_).split(" "))) return split_tokens def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int: """simple docstring""" _UpperCamelCase = token.lower() return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token)) def __UpperCAmelCase ( self : Any , lowercase_ : int) -> str: """simple docstring""" return self.decoder.get(lowercase_ , self.unk_token) def __UpperCAmelCase ( self : Any , lowercase_ : List[str]) -> str: """simple docstring""" _UpperCamelCase = " ".join(lowercase_).replace("@@ " , "").strip() return out_string def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase_): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return _UpperCamelCase = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _UpperCamelCase = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(lowercase_ , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_) + "\n") _UpperCamelCase = 0 with open(lowercase_ , "w" , encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_: kv[1]): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' " Please check that the tokenizer is not corrupted!") _UpperCamelCase = token_index writer.write(" ".join(lowercase_) + "\n") index += 1 return vocab_file, merge_file
361
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = parent _UpperCamelCase = 13 _UpperCamelCase = 7 _UpperCamelCase = 30 _UpperCamelCase = self.seq_length + self.mem_len _UpperCamelCase = 15 _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = 99 _UpperCamelCase = [10, 50, 80] _UpperCamelCase = 32 _UpperCamelCase = 32 _UpperCamelCase = 4 _UpperCamelCase = 8 _UpperCamelCase = 128 _UpperCamelCase = 2 _UpperCamelCase = 2 _UpperCamelCase = None _UpperCamelCase = 1 _UpperCamelCase = 0 _UpperCamelCase = 3 _UpperCamelCase = self.vocab_size - 1 _UpperCamelCase = 0.01 def __UpperCAmelCase ( self : Dict) -> Optional[int]: """simple docstring""" _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" random.seed(self.seed) tf.random.set_seed(self.seed) def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = TFTransfoXLModel(lowercase_) _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() _UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a} _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[Any]) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_) _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() _UpperCamelCase = {"input_ids": input_ids_a, "labels": lm_labels} _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() _UpperCamelCase , _UpperCamelCase = model([input_ids_a, mems_a]).to_tuple() _UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict) -> str: """simple docstring""" _UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_) _UpperCamelCase = model(lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" _UpperCamelCase = self.prepare_config_and_inputs() ((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs _UpperCamelCase = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ): '''simple docstring''' __A = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __A = () if is_tf_available() else () __A = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __A = False __A = False __A = False __A = False def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str]) -> Any: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __UpperCAmelCase ( self : Optional[Any]) -> int: """simple docstring""" _UpperCamelCase = TFTransfoXLModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=lowercase_ , d_embed=37) def __UpperCAmelCase ( self : Dict) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]: """simple docstring""" self.model_tester.set_seed() _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowercase_) def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" self.model_tester.set_seed() _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_) def __UpperCAmelCase ( self : List[str]) -> List[Any]: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_) def __UpperCAmelCase ( self : Dict) -> int: """simple docstring""" _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _UpperCamelCase = model_class(lowercase_) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: _UpperCamelCase = model.get_output_embeddings() assert isinstance(lowercase_ , tf.keras.layers.Layer) _UpperCamelCase = model.get_bias() assert name is None else: _UpperCamelCase = model.get_output_embeddings() assert x is None _UpperCamelCase = model.get_bias() assert name is None def __UpperCAmelCase ( self : Optional[int]) -> Any: """simple docstring""" pass @slow def __UpperCAmelCase ( self : List[str]) -> Tuple: """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.") def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" pass @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip("Skip test until #12651 is resolved.") @slow def __UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" _UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") # fmt: off _UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _UpperCamelCase = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_) self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_)
63
0
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowerCamelCase (_a ): def __init__( self: Optional[Any],*A_: Any,A_: Union[str, Any]=None,A_: Tuple=None,**A_: Dict ): '''simple docstring''' super().__init__(*A_,**A_ ) __UpperCamelCase = eval_examples __UpperCamelCase = post_process_function def snake_case_ ( self: Optional[int],A_: Union[str, Any]=None,A_: List[str]=None,A_: Optional[Any]=None,A_: str = "eval" ): '''simple docstring''' __UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset __UpperCamelCase = self.get_eval_dataloader(A_ ) __UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __UpperCamelCase = self.compute_metrics __UpperCamelCase = None __UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __UpperCamelCase = time.time() try: __UpperCamelCase = eval_loop( A_,description='Evaluation',prediction_loss_only=True if compute_metrics is None else None,ignore_keys=A_,metric_key_prefix=A_,) finally: __UpperCamelCase = compute_metrics __UpperCamelCase = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( A_,A_,num_samples=output.num_samples,num_steps=math.ceil(output.num_samples / total_batch_size ),) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __UpperCamelCase = self.post_process_function(A_,A_,output.predictions ) __UpperCamelCase = self.compute_metrics(A_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __UpperCamelCase = metrics.pop(A_ ) metrics.update(output.metrics ) else: __UpperCamelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __UpperCamelCase = self.callback_handler.on_evaluate(self.args,self.state,self.control,A_ ) return metrics def snake_case_ ( self: int,A_: Optional[Any],A_: int,A_: str=None,A_: str = "test" ): '''simple docstring''' __UpperCamelCase = self.get_test_dataloader(A_ ) # Temporarily disable metric computation, we will do it in the loop here. __UpperCamelCase = self.compute_metrics __UpperCamelCase = None __UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __UpperCamelCase = time.time() try: __UpperCamelCase = eval_loop( A_,description='Prediction',prediction_loss_only=True if compute_metrics is None else None,ignore_keys=A_,metric_key_prefix=A_,) finally: __UpperCamelCase = compute_metrics __UpperCamelCase = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( A_,A_,num_samples=output.num_samples,num_steps=math.ceil(output.num_samples / total_batch_size ),) ) if self.post_process_function is None or self.compute_metrics is None: return output __UpperCamelCase = self.post_process_function(A_,A_,output.predictions,'predict' ) __UpperCamelCase = self.compute_metrics(A_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): __UpperCamelCase = metrics.pop(A_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions,label_ids=predictions.label_ids,metrics=A_ )
310
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {} __UpperCamelCase = padding_side return tokenizer( [line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , ) def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]: """simple docstring""" __UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCamelCase (_a ): def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",): '''simple docstring''' super().__init__() __UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' ) __UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' ) __UpperCamelCase = self.get_char_lens(self.src_file ) __UpperCamelCase = max_source_length __UpperCamelCase = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' __UpperCamelCase = tokenizer __UpperCamelCase = prefix if n_obs is not None: __UpperCamelCase = self.src_lens[:n_obs] __UpperCamelCase = src_lang __UpperCamelCase = tgt_lang def __len__( self: Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self: int,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = index + 1 # linecache starts at 1 __UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' ) __UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer,A_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __UpperCamelCase = ( self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer ) __UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer __UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' ) __UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' ) __UpperCamelCase = source_inputs['input_ids'].squeeze() __UpperCamelCase = target_inputs['input_ids'].squeeze() __UpperCamelCase = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def snake_case_ ( A_: List[Any] ): '''simple docstring''' return [len(A_ ) for x in Path(A_ ).open().readlines()] def snake_case_ ( self: Union[str, Any],A_: Any ): '''simple docstring''' __UpperCamelCase = torch.stack([x['input_ids'] for x in batch] ) __UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] ) __UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] ) __UpperCamelCase = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer,A_ ) else self.tokenizer.pad_token_id ) __UpperCamelCase = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer,A_ ) else self.tokenizer.pad_token_id ) __UpperCamelCase = trim_batch(A_,A_ ) __UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ ) __UpperCamelCase = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch __snake_case = getLogger(__name__) def _A ( _lowercase ) -> Any: """simple docstring""" return list(itertools.chain.from_iterable(_lowercase ) ) def _A ( _lowercase ) -> None: """simple docstring""" __UpperCamelCase = get_git_info() save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) ) def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]: """simple docstring""" with open(_lowercase , 'w' ) as f: json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase ) def _A ( _lowercase ) -> Union[str, Any]: """simple docstring""" with open(_lowercase ) as f: return json.load(_lowercase ) def _A ( ) -> Dict: """simple docstring""" __UpperCamelCase = git.Repo(search_parent_directories=_lowercase ) __UpperCamelCase = { 'repo_id': str(_lowercase ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def _A ( _lowercase , _lowercase ) -> List: """simple docstring""" return list(map(_lowercase , _lowercase ) ) def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" with open(_lowercase , 'wb' ) as f: return pickle.dump(_lowercase , _lowercase ) def _A ( _lowercase ) -> List[Any]: """simple docstring""" def remove_articles(_lowercase ): return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase ) def white_space_fix(_lowercase ): return " ".join(text.split() ) def remove_punc(_lowercase ): __UpperCamelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) ) def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = normalize_answer(_lowercase ).split() __UpperCamelCase = normalize_answer(_lowercase ).split() __UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase ) __UpperCamelCase = sum(common.values() ) if num_same == 0: return 0 __UpperCamelCase = 1.0 * num_same / len(_lowercase ) __UpperCamelCase = 1.0 * num_same / len(_lowercase ) __UpperCamelCase = (2 * precision * recall) / (precision + recall) return fa def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" return normalize_answer(_lowercase ) == normalize_answer(_lowercase ) def _A ( _lowercase , _lowercase ) -> Dict: """simple docstring""" assert len(_lowercase ) == len(_lowercase ) __UpperCamelCase = 0 for hypo, pred in zip(_lowercase , _lowercase ): em += exact_match_score(_lowercase , _lowercase ) if len(_lowercase ) > 0: em /= len(_lowercase ) return {"em": em} def _A ( _lowercase ) -> Optional[Any]: """simple docstring""" return model_prefix.startswith('rag' ) def _A ( _lowercase , _lowercase , _lowercase ) -> Dict: """simple docstring""" __UpperCamelCase = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __UpperCamelCase = 'dropout_rate' for p in extra_params: if getattr(_lowercase , _lowercase , _lowercase ): if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) ) delattr(_lowercase , _lowercase ) continue __UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p] setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) ) delattr(_lowercase , _lowercase ) return hparams, config
310
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCamelCase : str = None __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase : str = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase : Union[str, Any] = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __lowerCamelCase : List[Any] = """▁""" class A__ ( __snake_case ): _UpperCAmelCase :Any = VOCAB_FILES_NAMES _UpperCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[int] = BigBirdTokenizer _UpperCAmelCase :Dict = ['input_ids', 'attention_mask'] _UpperCAmelCase :List[int] = [] def __init__( self , A_=None , A_=None , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , **A_ , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token UpperCamelCase : Tuple = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token UpperCamelCase : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token UpperCamelCase : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token UpperCamelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token UpperCamelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , ) UpperCamelCase : Optional[Any] = vocab_file UpperCamelCase : List[str] = False if not self.vocab_file else True def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : int = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCamelCase( self , A_ , A_ = None , A_ = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Optional[int] = [self.sep_token_id] UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(A_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
140
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=13 , A_=3 , A_=224 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ): '''simple docstring''' UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18} UpperCamelCase : Tuple = parent UpperCamelCase : Tuple = batch_size UpperCamelCase : Tuple = num_channels UpperCamelCase : str = image_size UpperCamelCase : Optional[int] = min_resolution UpperCamelCase : List[Any] = max_resolution UpperCamelCase : Union[str, Any] = do_resize UpperCamelCase : str = size UpperCamelCase : List[str] = do_normalize UpperCamelCase : Any = image_mean UpperCamelCase : int = image_std def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Tuple = ViTImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = EfficientFormerImageProcessorTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Tuple = image_processor(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Optional[Any] = image_processor(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processor(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
140
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase ={ "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
334
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
211
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = state_dict.pop(_UpperCamelCase ) snake_case_ : Any = val def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case_ : Optional[int] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) snake_case_ : List[Any] = value else: snake_case_ : List[Any] = value return new_state_dict def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = '''''' if is_panoptic: snake_case_ : Tuple = '''conditional_detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Tuple = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : int = in_proj_weight[:256, :] snake_case_ : List[str] = in_proj_bias[:256] snake_case_ : Tuple = in_proj_weight[256:512, :] snake_case_ : Dict = in_proj_bias[256:512] snake_case_ : Optional[int] = in_proj_weight[-256:, :] snake_case_ : Optional[Any] = in_proj_bias[-256:] def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : Optional[int] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case_ : List[str] = '''resnet101''' if "dc5" in model_name: snake_case_ : Dict = True snake_case_ : Optional[Any] = '''panoptic''' in model_name if is_panoptic: snake_case_ : List[str] = 250 else: snake_case_ : Union[str, Any] = 91 snake_case_ : Dict = '''huggingface/label-files''' snake_case_ : Optional[Any] = '''coco-detection-id2label.json''' snake_case_ : str = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} snake_case_ : Tuple = idalabel snake_case_ : Any = {v: k for k, v in idalabel.items()} # load image processor snake_case_ : List[str] = '''coco_panoptic''' if is_panoptic else '''coco_detection''' snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format=_UpperCamelCase ) # prepare image snake_case_ : Union[str, Any] = prepare_img() snake_case_ : Tuple = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ) snake_case_ : Any = encoding['''pixel_values'''] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub snake_case_ : Union[str, Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , _UpperCamelCase , pretrained=_UpperCamelCase ).eval() snake_case_ : str = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case_ : Optional[Any] = '''conditional_detr.''' + src rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[Any] = rename_backbone_keys(_UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''conditional_detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): snake_case_ : str = state_dict.pop(_UpperCamelCase ) snake_case_ : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : Optional[Any] = state_dict.pop(_UpperCamelCase ) snake_case_ : Union[str, Any] = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: snake_case_ : Any = state_dict.pop(_UpperCamelCase ) snake_case_ : List[str] = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): snake_case_ : Any = state_dict.pop(_UpperCamelCase ) snake_case_ : Tuple = val # finally, create HuggingFace model and load state dict snake_case_ : int = ConditionalDetrForSegmentation(_UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() model.push_to_hub(repo_id=_UpperCamelCase , organization='''DepuMeng''' , commit_message='''Add model''' ) # verify our conversion snake_case_ : int = conditional_detr(_UpperCamelCase ) snake_case_ : Optional[int] = model(_UpperCamelCase ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
351
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowerCamelCase_ ( _UpperCamelCase ) -> tuple: """simple docstring""" return (data["data"], data["target"]) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> XGBClassifier: """simple docstring""" snake_case_ : Optional[Any] = XGBClassifier() classifier.fit(_UpperCamelCase , _UpperCamelCase ) return classifier def lowerCamelCase_ ( ) -> None: """simple docstring""" snake_case_ : Optional[Any] = load_iris() snake_case_ , snake_case_ : str = data_handling(_UpperCamelCase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = train_test_split( _UpperCamelCase , _UpperCamelCase , test_size=0.25 ) snake_case_ : List[str] = iris['''target_names'''] # Create an XGBoost Classifier from the training data snake_case_ : int = xgboost(_UpperCamelCase , _UpperCamelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='''Blues''' , normalize='''true''' , ) plt.title('''Normalized Confusion Matrix - IRIS Dataset''' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
279
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def a_ ( __lowercase : str ) -> str: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _snake_case = k.replace(__lowercase , __lowercase ) if k.startswith('encoder' ): _snake_case = k.replace('.attn' , '.self_attn' ) _snake_case = k.replace('norm1' , 'self_attn_layer_norm' ) _snake_case = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): _snake_case = k.replace('norm1' , 'self_attn_layer_norm' ) _snake_case = k.replace('norm2' , 'encoder_attn_layer_norm' ) _snake_case = k.replace('norm3' , 'final_layer_norm' ) return k def a_ ( __lowercase : Optional[int] ) -> List[str]: _snake_case = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: _snake_case = sd.pop(__lowercase ) _snake_case = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd _snake_case = v _lowerCamelCase : Union[str, Any] = ['''START'''] @torch.no_grad() def a_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple ) -> int: _snake_case = torch.load(__lowercase , map_location='cpu' ) _snake_case = model['model'] _snake_case = BlenderbotConfig.from_json_file(__lowercase ) _snake_case = BlenderbotForConditionalGeneration(__lowercase ) _snake_case = m.model.state_dict().keys() _snake_case = [] _snake_case = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _snake_case = rename_state_dict_key(__lowercase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _snake_case = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__lowercase ) m.model.load_state_dict(__lowercase , strict=__lowercase ) m.half() m.save_pretrained(__lowercase ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) _lowerCamelCase : Dict = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
282
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : UNetaDModel UpperCAmelCase : KarrasVeScheduler def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ): super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self : Any , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[int] , ): _A = self.unet.config.sample_size _A = (batch_size, 3, img_size, img_size) _A = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) _A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper _A = self.scheduler.schedule[t] _A = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat _A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. _A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev _A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. _A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample _A = self.scheduler.step_correct( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , ) _A = step_output.prev_sample _A = (sample / 2 + 0.5).clamp(0 , 1 ) _A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
271
"""simple docstring""" from collections import deque class lowercase_ : '''simple docstring''' def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ): _A = process_name # process name _A = arrival_time # arrival time of the process # completion time of finished process or last interrupted time _A = arrival_time _A = burst_time # remaining burst time _A = 0 # total time of the process wait in ready queue _A = 0 # time from arrival time to completion time class lowercase_ : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int , ): # total number of mlfq's queues _A = number_of_queues # time slice of queues that round robin algorithm applied _A = time_slices # unfinished process is in this ready_queue _A = queue # current time _A = current_time # finished process is in this sequence queue _A = deque() def lowerCAmelCase_ ( self : Dict ): _A = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : list[Process] ): _A = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : list[Process] ): _A = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : list[Process] ): _A = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : deque[Process] ): return [q.burst_time for q in queue] def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def lowerCAmelCase_ ( self : int , _UpperCAmelCase : deque[Process] ): _A = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: _A = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 _A = 0 # set the process's turnaround time because it is finished _A = self.current_time - cp.arrival_time # set the completion time _A = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int ): _A = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): _A = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time _A = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished _A = 0 # set the finish time _A = self.current_time # update the process' turnaround time because it is finished _A = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def lowerCAmelCase_ ( self : str ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): _A , _A = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest a = Process('''P1''', 0, 53) a = Process('''P2''', 0, 17) a = Process('''P3''', 0, 68) a = Process('''P4''', 0, 24) a = 3 a = [17, 25] a = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) a = Process('''P1''', 0, 53) a = Process('''P2''', 0, 17) a = Process('''P3''', 0, 68) a = Process('''P4''', 0, 24) a = 3 a = [17, 25] a = deque([Pa, Pa, Pa, Pa]) a = MLFQ(number_of_queues, time_slices, queue, 0) a = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
271
1
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase: List[Any] = logging.get_logger(__name__) _lowercase: str = '▁' _lowercase: Dict = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'} _lowercase: str = { 'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt', } _lowercase: Any = { 'vocab_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', }, 'sentencepiece_model_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', }, } _lowercase: Any = { 'ernie-m-base': 514, 'ernie-m-large': 514, } _lowercase: Any = { 'ernie-m-base': {'do_lower_case': False}, 'ernie-m-large': {'do_lower_case': False}, } class _lowercase ( lowerCamelCase_ ): """simple docstring""" __A = ["input_ids"] __A = VOCAB_FILES_NAMES __A = PRETRAINED_INIT_CONFIGURATION __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = PRETRAINED_VOCAB_FILES_MAP __A = RESOURCE_FILES_NAMES def __init__(self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_="utf8" , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , vocab_file=__a , encoding=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) a = do_lower_case a = sentencepiece_model_ckpt a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__a ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: a = self.load_vocab(filepath=__a ) else: a = {self.sp_model.id_to_piece(__a ): id for id in range(self.sp_model.get_piece_size() )} a = {v: k for k, v in self.vocab.items()} def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if text is None: return None a = self.tokenize(__a ) a , a = "", [] for i, ch in enumerate(__a ): if ch in self.SP_CHAR_MAPPING: a = self.SP_CHAR_MAPPING.get(__a ) else: a = unicodedata.normalize("NFKC" , __a ) if self.is_whitespace(__a ): continue normalized_text += ch char_mapping.extend([i] * len(__a ) ) a , a , a = normalized_text, [], 0 if self.do_lower_case: a = text.lower() for token in split_tokens: if token[:1] == "▁": a = token[1:] a = text[offset:].index(__a ) + offset a = start + len(__a ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) a = end return token_mapping @property def UpperCamelCase_ (self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase_ (self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__(self ): """simple docstring""" a = self.__dict__.copy() a = None return state def __setstate__(self , lowerCamelCase_ ): """simple docstring""" a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__a , __a ) for c in text) ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=64 , lowerCamelCase_=0.1 ): """simple docstring""" if self.sp_model_kwargs.get("enable_sampling" ) is True: a = True if self.sp_model_kwargs.get("alpha" ) is not None: a = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: a = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: a = self.sp_model.EncodeAsPieces(__a ) else: a = self.sp_model.SampleEncodeAsPieces(__a , __a , __a ) a = [] for pi, piece in enumerate(__a ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__a ) and pi != 0: new_pieces.append(__a ) continue else: continue a = 0 for i, chunk in enumerate(__a ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__a ) or self.is_punct(__a ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__a ) a = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) a = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) a = i if len(__a ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = "".join(__a ).replace(__a , " " ).strip() return out_string def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = self.convert_ids_to_tokens(__a ) a = "".join(__a ).replace(__a , " " ).strip() return out_string def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return self.vocab.get(__a , self.vocab.get(self.unk_token ) ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return self.reverse_vocab.get(__a , self.unk_token ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a = [self.cls_token_id] a = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__a ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__a ) + 1) + [1] * (len(__a ) + 3) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__a ) == 1: a = unicodedata.category(__a ) if cat == "Zs": return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = {} with io.open(__a , "r" , encoding="utf-8" ) as f: for index, line in enumerate(__a ): a = line.rstrip("\n" ) a = int(__a ) return token_to_idx def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" a = 0 if os.path.isdir(__a ): a = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: a = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(__a , "w" , encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) a = token_index writer.write(token + "\n" ) index += 1 a = os.path.join(__a , "sentencepiece.bpe.model" ) with open(__a , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__a ) return (vocab_file,)
227
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
0
'''simple docstring''' import re def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : str = re.compile( R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" ) return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": lowerCamelCase__ = '0094702343221' print(is_sri_lankan_phone_number(phone))
370
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu' def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ): _UpperCAmelCase : Any = text.split(__lowerCAmelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Dict = [], [] for title, text in zip(documents["title"] , documents["text"] ): if text is not None: for passage in split_text(__lowerCAmelCase ): titles.append(title if title is not None else "" ) texts.append(__lowerCAmelCase ) return {"title": titles, "text": texts} def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : str = ctx_tokenizer( documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"] _UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): ###################################### logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _UpperCAmelCase : Optional[int] = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc ) # And compute the embeddings _UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _UpperCAmelCase : Dict = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space _UpperCAmelCase : int = dataset.map( partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , ) # And finally save your dataset _UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" ) dataset.save_to_disk(__lowerCAmelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase ) # And save the index _UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(__lowerCAmelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase__ : lowerCAmelCase : str = field( default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) lowerCAmelCase : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) lowerCAmelCase : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) lowerCAmelCase : Optional[str] = field( default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) lowerCAmelCase : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) lowerCAmelCase : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
322
0
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _UpperCAmelCase = """sshleifer/bart-tiny-random""" _UpperCAmelCase = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return AutoConfig.from_pretrained(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : int = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : str = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , *A_ : List[str] = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaises(lowercase ): create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=lowercase , d=lowercase )
140
def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : int ,__lowercase : int ,__lowercase : list[int] ): '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : list[int] ,__lowercase : int ): '''simple docstring''' if curr_ind == len(__lowercase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 ,len(__lowercase ) ): if valid_connection(__lowercase ,__lowercase ,__lowercase ,__lowercase ): # Insert current vertex into path as next transition A_ : Tuple = next_ver # Validate created path if util_hamilton_cycle(__lowercase ,__lowercase ,curr_ind + 1 ): return True # Backtrack A_ : Tuple = -1 return False def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : int = 0 ): '''simple docstring''' A_ : Any = [-1] * (len(__lowercase ) + 1) # initialize start and end of path with starting index A_ : Dict = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__lowercase ,__lowercase ,1 ) else []
140
1
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase ( unittest.TestCase ): @slow def A_ ( self : Any ) -> int: lowerCamelCase__ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) lowerCamelCase__ : Any = 'The dog is cute and lives in the garden house' lowerCamelCase__ : Union[str, Any] = jnp.array([tokenizer.encode(UpperCAmelCase )] ) lowerCamelCase__ : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim lowerCamelCase__ : str = jnp.array( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )['last_hidden_state'] self.assertEqual(output.shape , UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
362
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[str] = logging.get_logger() @dataclass class lowerCAmelCase : UpperCAmelCase__ = 42 UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Tensor ) -> Any: lowerCamelCase__ : List[str] = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCAmelCase ) def __call__( self : Any , UpperCAmelCase : Tensor ) -> Dict: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def A_ ( self : List[str] ) -> int: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase : UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 0 UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) def __call__( self : Any , UpperCAmelCase : Tensor ) -> int: lowerCamelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase ).parametrized lowerCamelCase__ : List[Any] = Tracker(self.src )(UpperCAmelCase ).parametrized lowerCamelCase__ : Any = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) ) lowerCamelCase__ : int = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) ) if len(UpperCAmelCase ) != len(UpperCAmelCase ): raise Exception( F"""Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while""" F""" destination module has {len(UpperCAmelCase )}.""" ) for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): lowerCamelCase__ : int = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval() lowerCamelCase__ : Union[str, Any] = ResNetForImageClassification(_UpperCAmelCase ).eval() lowerCamelCase__ : str = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase ) lowerCamelCase__ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(_UpperCAmelCase ) assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one." lowerCamelCase__ : Union[str, Any] = F"""resnet{"-".join(name.split("resnet" ) )}""" print(_UpperCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , ) # we can use the convnext one lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ) -> List[str]: lowerCamelCase__ : Dict = 'imagenet-1k-id2label.json' lowerCamelCase__ : Optional[int] = 1000 lowerCamelCase__ : int = (1, num_labels) lowerCamelCase__ : Any = 'huggingface/label-files' lowerCamelCase__ : str = num_labels lowerCamelCase__ : Any = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) lowerCamelCase__ : Any = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : str = idalabel lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()} lowerCamelCase__ : Tuple = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : str = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
45
0
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A__ ( _a , _a , unittest.TestCase ): __UpperCamelCase : Any = IFInpaintingSuperResolutionPipeline __UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} __UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) __UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' return self._get_superresolution_dummy_components() def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple=0 ) -> List[str]: '''simple docstring''' if str(SCREAMING_SNAKE_CASE ).startswith("""mps""" ): _a : Dict =torch.manual_seed(SCREAMING_SNAKE_CASE ) else: _a : List[str] =torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) _a : str =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) _a : Tuple =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) _a : Optional[Any] ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def __UpperCAmelCase ( self :Dict ) -> Optional[Any]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __UpperCAmelCase ( self :int ) -> Dict: '''simple docstring''' self._test_save_load_local() def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
276
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : List[str] lowerCamelCase_ : Optional[List[str]] @dataclass class __lowerCAmelCase : lowerCamelCase_ : List[int] lowerCamelCase_ : List[int] lowerCamelCase_ : Optional[List[int]] = None lowerCamelCase_ : Optional[List[int]] = None class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''train''' lowerCamelCase_ : List[str] = '''dev''' lowerCamelCase_ : List[Any] = '''test''' class __lowerCAmelCase : @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]: '''simple docstring''' snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )} snake_case_ : Dict = [] for ex_index, example in enumerate(__magic_name__ ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) ) snake_case_ : List[str] = [] snake_case_ : List[str] = [] for word, label in zip(example.words , example.labels ): snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__magic_name__ ) > 0: tokens.extend(__magic_name__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add() if len(__magic_name__ ) > max_seq_length - special_tokens_count: snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)] snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ : Union[str, Any] = [cls_token] + tokens snake_case_ : List[Any] = [pad_token_label_id] + label_ids snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ ) # Zero-pad up to the sequence length. snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ ) if pad_on_left: snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : int = None features.append( InputFeatures( input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = os.path.join( __magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ : Dict = cached_features_file + '''.lock''' with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) snake_case_ : Dict = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , __magic_name__ ) def __len__(self ) -> Optional[Any]: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = -100 def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : Optional[Any] = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ : int = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__(self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i]
279
0
from __future__ import annotations from math import pow, sqrt def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ) -> Union[str, Any]: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance == 0: return {"resistance": sqrt(pow(UpperCamelCase_ , 2 ) - pow(UpperCamelCase_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(UpperCamelCase_ , 2 ) - pow(UpperCamelCase_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(UpperCamelCase_ , 2 ) + pow(UpperCamelCase_ , 2 ) )} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Union[str, Any] = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[str] = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
122
0
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class UpperCAmelCase__ : """simple docstring""" def __init__( self : Any ,_a : List[Any] ,_a : List[str]=sys.maxsize ): '''simple docstring''' _a : List[str] = 'bilinear' _a : List[Any] = max_size _a : Dict = short_edge_length def __call__( self : int ,_a : List[Any] ): '''simple docstring''' _a : List[str] = [] for img in imgs: _a, _a : Union[str, Any] = img.shape[:2] # later: provide list and randomly choose index for resize _a : Optional[int] = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 ) if size == 0: return img _a : Any = size * 1.0 / min(_a ,_a ) if h < w: _a, _a : Optional[Any] = size, scale * w else: _a, _a : Union[str, Any] = scale * h, size if max(_a ,_a ) > self.max_size: _a : Any = self.max_size * 1.0 / max(_a ,_a ) _a : Any = newh * scale _a : Dict = neww * scale _a : str = int(neww + 0.5 ) _a : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: _a : Any = Image.fromarray(_a ) _a : List[Any] = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR ) _a : Tuple = np.asarray(_a ) else: _a : Dict = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _a : Any = nn.functional.interpolate( _a ,(newh, neww) ,mode=self.interp_method ,align_corners=_a ).squeeze(0 ) img_augs.append(_a ) return img_augs class UpperCAmelCase__ : """simple docstring""" def __init__( self : str ,_a : Union[str, Any] ): '''simple docstring''' _a : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST ) _a : Any = cfg.INPUT.FORMAT _a : List[Any] = cfg.SIZE_DIVISIBILITY _a : str = cfg.PAD_VALUE _a : List[str] = cfg.INPUT.MAX_SIZE_TEST _a : Union[str, Any] = cfg.MODEL.DEVICE _a : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 ) _a : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 ) _a : Optional[Any] = lambda _a : (x - self.pixel_mean) / self.pixel_std def __lowercase ( self : int ,_a : Optional[Any] ): '''simple docstring''' _a : List[Any] = tuple(max(_a ) for s in zip(*[img.shape for img in images] ) ) _a : List[Any] = [im.shape[-2:] for im in images] _a : Dict = [ nn.functional.pad( _a ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,) for size, im in zip(_a ,_a ) ] return torch.stack(_a ), torch.tensor(_a ) def __call__( self : List[Any] ,_a : Union[str, Any] ,_a : Union[str, Any]=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(_a ,_a ): _a : Optional[Any] = [images] if single_image: assert len(_a ) == 1 for i in range(len(_a ) ): if isinstance(images[i] ,torch.Tensor ): images.insert(_a ,images.pop(_a ).to(self.device ).float() ) elif not isinstance(images[i] ,torch.Tensor ): images.insert( _a ,torch.as_tensor(img_tensorize(images.pop(_a ) ,input_format=self.input_format ) ) .to(self.device ) .float() ,) # resize smallest edge _a : List[Any] = torch.tensor([im.shape[:2] for im in images] ) _a : Any = self.aug(_a ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _a : Any = [self.normalizer(_a ) for x in images] # now pad them to do the following operations _a, _a : Any = self.pad(_a ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _a : List[str] = torch.true_divide(_a ,_a ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def UpperCAmelCase_ (__a : Tuple , __a : List[Any] ): """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def UpperCAmelCase_ (__a : Any , __a : Tuple[int, int] ): """simple docstring""" assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!" _a, _a : List[str] = box_size tensor[:, 0].clamp_(min=0 , max=__a ) tensor[:, 1].clamp_(min=0 , max=__a ) tensor[:, 2].clamp_(min=0 , max=__a ) tensor[:, 3].clamp_(min=0 , max=__a )
271
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCAmelCase__ : """simple docstring""" def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,): '''simple docstring''' _a : Dict = parent _a : Union[str, Any] = batch_size _a : Tuple = is_training _a : List[str] = use_auxiliary_loss _a : Optional[Any] = num_queries _a : str = num_channels _a : List[str] = min_size _a : int = max_size _a : Optional[int] = num_labels _a : List[str] = hidden_dim _a : int = hidden_dim def __lowercase ( self : Union[str, Any] ): '''simple docstring''' _a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _a ) _a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a ) _a : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5 ).float() _a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long() _a : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __lowercase ( self : Union[str, Any] ): '''simple docstring''' _a : int = MaskaFormerConfig( hidden_size=self.hidden_dim ,) _a : str = self.num_queries _a : Union[str, Any] = self.num_labels _a : Tuple = [1, 1, 1, 1] _a : Dict = self.num_channels _a : str = 64 _a : Tuple = 128 _a : Optional[Any] = self.hidden_dim _a : Union[str, Any] = self.hidden_dim _a : List[Any] = self.hidden_dim return config def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs() _a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ): '''simple docstring''' _a : str = output.encoder_hidden_states _a : Any = output.pixel_decoder_hidden_states _a : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) ,config.decoder_layers ) def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ): '''simple docstring''' with torch.no_grad(): _a : str = MaskaFormerModel(config=_a ) model.to(_a ) model.eval() _a : Any = model(pixel_values=_a ,pixel_mask=_a ) _a : Optional[Any] = model(_a ,output_hidden_states=_a ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_a ,_a ) def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ): '''simple docstring''' _a : int = MaskaFormerForUniversalSegmentation(config=_a ) model.to(_a ) model.eval() def comm_check_on_output(_a : Any ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _a : Any = model(pixel_values=_a ,pixel_mask=_a ) _a : Optional[int] = model(_a ) comm_check_on_output(_a ) _a : List[str] = model( pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a ) comm_check_on_output(_a ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase : Dict = False __UpperCAmelCase : Tuple = False __UpperCAmelCase : Dict = False __UpperCAmelCase : List[Any] = False def __lowercase ( self : Optional[int] ): '''simple docstring''' _a : Union[str, Any] = MaskaFormerModelTester(self ) _a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : Optional[int] ): '''simple docstring''' _a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a ) def __lowercase ( self : str ): '''simple docstring''' _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def __lowercase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def __lowercase ( self : str ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def __lowercase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def __lowercase ( self : Dict ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowercase ( self : List[Any] ): '''simple docstring''' pass def __lowercase ( self : int ): '''simple docstring''' _a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Union[str, Any] = model_class(_a ) _a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Optional[Any] = [*signature.parameters.keys()] _a : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_a ) @slow def __lowercase ( self : List[str] ): '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _a : Dict = MaskaFormerModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def __lowercase ( self : List[Any] ): '''simple docstring''' _a : int = (self.model_tester.min_size,) * 2 _a : Any = { 'pixel_values': torch.randn((2, 3, *size) ,device=_a ), 'mask_labels': torch.randn((2, 10, *size) ,device=_a ), 'class_labels': torch.zeros(2 ,10 ,device=_a ).long(), } _a : List[Any] = self.model_tester.get_config() _a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a ) _a : str = model(**_a ) self.assertTrue(outputs.loss is not None ) def __lowercase ( self : List[str] ): '''simple docstring''' _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a ) def __lowercase ( self : int ): '''simple docstring''' _a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Any = model_class(_a ).to(_a ) _a : Optional[int] = model(**_a ,output_attentions=_a ) self.assertTrue(outputs.attentions is not None ) def __lowercase ( self : Tuple ): '''simple docstring''' if not self.model_tester.is_training: return _a : List[str] = self.all_model_classes[1] _a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs() _a : Any = model_class(_a ) model.to(_a ) model.train() _a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss loss.backward() def __lowercase ( self : int ): '''simple docstring''' _a : int = self.all_model_classes[1] _a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs() _a : str = True _a : str = True _a : List[str] = model_class(_a ).to(_a ) model.train() _a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a ) _a : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _a : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _a : Dict = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _a : List[str] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_a ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __lowerCAmelCase = 1e-4 def UpperCAmelCase_ (): """simple docstring""" _a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def __lowercase ( self : Any ): '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def __lowercase ( self : Any ): '''simple docstring''' _a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a ) _a : int = self.default_image_processor _a : Tuple = prepare_img() _a : Any = image_processor(_a ,return_tensors='pt' ).to(_a ) _a : Union[str, Any] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a ,(1, 3, 384, 384) ) with torch.no_grad(): _a : Optional[Any] = model(**_a ) _a : List[Any] = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) ) _a : str = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) ) _a : Any = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) ) def __lowercase ( self : Tuple ): '''simple docstring''' _a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval() _a : Optional[Any] = self.default_image_processor _a : List[Any] = prepare_img() _a : str = image_processor(_a ,return_tensors='pt' ).to(_a ) _a : Any = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a ,(1, 3, 384, 384) ) with torch.no_grad(): _a : Optional[int] = model(**_a ) # masks_queries_logits _a : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _a : Dict = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] _a : Optional[Any] = torch.tensor(_a ).to(_a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) ) # class_queries_logits _a : str = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) _a : str = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(_a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval() _a : Tuple = self.default_image_processor _a : Tuple = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) _a : str = inputs['pixel_values'].to(_a ) _a : str = [el.to(_a ) for el in inputs['mask_labels']] _a : Dict = [el.to(_a ) for el in inputs['class_labels']] with torch.no_grad(): _a : List[str] = model(**_a ) self.assertTrue(outputs.loss is not None )
271
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _UpperCamelCase : '''simple docstring''' def __init__( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Any=1_3 , _lowerCAmelCase : str=3_0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Union[str, Any]=3_2 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Union[str, Any]=3_7 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[Any]=1_0 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=2 , ): '''simple docstring''' __lowercase =parent __lowercase =batch_size __lowercase =image_size __lowercase =patch_size __lowercase =num_channels __lowercase =is_training __lowercase =use_labels __lowercase =hidden_size __lowercase =num_hidden_layers __lowercase =num_attention_heads __lowercase =intermediate_size __lowercase =hidden_act __lowercase =hidden_dropout_prob __lowercase =attention_probs_dropout_prob __lowercase =type_sequence_label_size __lowercase =initializer_range __lowercase =scope __lowercase =encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowercase =(image_size // patch_size) ** 2 __lowercase =num_patches + 2 def __lowerCamelCase ( self : Any): '''simple docstring''' __lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __lowercase =None if self.use_labels: __lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size) __lowercase =self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int): '''simple docstring''' __lowercase =TFDeiTModel(config=_lowerCAmelCase) __lowercase =model(_lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str]): '''simple docstring''' __lowercase =TFDeiTForMaskedImageModeling(config=_lowerCAmelCase) __lowercase =model(_lowerCAmelCase) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __lowercase =1 __lowercase =TFDeiTForMaskedImageModeling(_lowerCAmelCase) __lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowercase =model(_lowerCAmelCase) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]): '''simple docstring''' __lowercase =self.type_sequence_label_size __lowercase =TFDeiTForImageClassification(_lowerCAmelCase) __lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __lowercase =1 __lowercase =TFDeiTForImageClassification(_lowerCAmelCase) __lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase =config_and_inputs __lowercase ={'pixel_values': pixel_values} return config, inputs_dict @require_tf class _UpperCamelCase ( A , A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCAmelCase__ = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =TFDeiTModelTester(self) __lowercase =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7) def __lowerCamelCase ( self : int): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def __lowerCamelCase ( self : List[str]): '''simple docstring''' pass def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase =model_class(_lowerCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) __lowercase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Dense)) def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase =model_class(_lowerCAmelCase) __lowercase =inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase =[*signature.parameters.keys()] __lowercase =['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCAmelCase) def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase) def __lowerCamelCase ( self : Any): '''simple docstring''' __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase) def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase) def __lowerCamelCase ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple=False): '''simple docstring''' __lowercase =super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase =TFDeiTModel.from_pretrained(_lowerCAmelCase) self.assertIsNotNone(_lowerCAmelCase) def _A ( ): """simple docstring""" __lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def __lowerCamelCase ( self : List[Any]): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') __lowercase =self.default_image_processor __lowercase =prepare_img() __lowercase =image_processor(images=_lowerCAmelCase , return_tensors='tf') # forward pass __lowercase =model(**_lowerCAmelCase) # verify the logits __lowercase =tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _lowerCAmelCase) __lowercase =tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4))
353
'''simple docstring''' from math import factorial def _A ( _lowerCAmelCase = 20 ): """simple docstring""" __lowercase =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... __lowercase =n // 2 return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowerCamelCase = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
48
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''vocab.txt'''} a_ = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } a_ = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } a_ = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class lowercase__ ( _UpperCAmelCase ): a_ =VOCAB_FILES_NAMES a_ =PRETRAINED_VOCAB_FILES_MAP a_ =PRETRAINED_INIT_CONFIGURATION a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ =ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> Dict: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars ): lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) ) lowerCAmelCase__ = do_lower_case lowerCAmelCase__ = strip_accents lowerCAmelCase__ = tokenize_chinese_chars lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase ) lowerCAmelCase__ = do_lower_case def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]: '''simple docstring''' lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]: '''simple docstring''' lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
340
a_ = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
340
1
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : Any ) -> List[str]: """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = [], [] while len(snake_case_ ) > 1: _lowerCAmelCase , _lowerCAmelCase = min(snake_case_ ), max(snake_case_ ) start.append(snake_case_ ) end.append(snake_case_ ) collection.remove(snake_case_ ) collection.remove(snake_case_ ) end.reverse() return start + collection + end if __name__ == "__main__": SCREAMING_SNAKE_CASE : Tuple = input('''Enter numbers separated by a comma:\n''').strip() SCREAMING_SNAKE_CASE : Any = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
370
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( __lowercase , unittest.TestCase ): __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def A__ (self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def A__ (self , lowerCamelCase , lowerCamelCase=0 ): '''simple docstring''' if str(lowerCamelCase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase ) _lowerCAmelCase = pipe(**lowerCamelCase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1e-3 ) def A__ (self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A__ (self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ (self ): '''simple docstring''' _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def A__ (self ): '''simple docstring''' _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(lowerCamelCase ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
317
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): __a = 42 class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] = 65536 , _lowerCamelCase : List[str] = None , _lowerCamelCase : str = 2 , _lowerCamelCase : Tuple = 2 , _lowerCamelCase : Optional[int] = 0 , _lowerCamelCase : Dict = "fourier" , _lowerCamelCase : Tuple = True , _lowerCamelCase : int = False , _lowerCamelCase : List[str] = 0.0 , _lowerCamelCase : List[Any] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCamelCase : str = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCamelCase : List[Any] = "UNetMidBlock1D" , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any] = (32, 32, 64) , _lowerCamelCase : Union[str, Any] = None , _lowerCamelCase : List[Any] = 8 , _lowerCamelCase : str = 1 , _lowerCamelCase : str = False , ): super().__init__() _snake_case = sample_size # time if time_embedding_type == "fourier": _snake_case = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_a , log=_a , flip_sin_to_cos=_a ) _snake_case = 2 * block_out_channels[0] elif time_embedding_type == "positional": _snake_case = Timesteps( block_out_channels[0] , flip_sin_to_cos=_a , downscale_freq_shift=_a ) _snake_case = block_out_channels[0] if use_timestep_embedding: _snake_case = block_out_channels[0] * 4 _snake_case = TimestepEmbedding( in_channels=_a , time_embed_dim=_a , act_fn=_a , out_dim=block_out_channels[0] , ) _snake_case = nn.ModuleList([] ) _snake_case = None _snake_case = nn.ModuleList([] ) _snake_case = None # down _snake_case = in_channels for i, down_block_type in enumerate(_a ): _snake_case = output_channel _snake_case = block_out_channels[i] if i == 0: input_channel += extra_in_channels _snake_case = i == len(_a ) - 1 _snake_case = get_down_block( _a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_a ) # mid _snake_case = get_mid_block( _a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_a , add_downsample=_a , ) # up _snake_case = list(reversed(_a ) ) _snake_case = reversed_block_out_channels[0] if out_block_type is None: _snake_case = out_channels else: _snake_case = block_out_channels[0] for i, up_block_type in enumerate(_a ): _snake_case = output_channel _snake_case = ( reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels ) _snake_case = i == len(_a ) - 1 _snake_case = get_up_block( _a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_a ) _snake_case = output_channel # out _snake_case = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) _snake_case = get_out_block( out_block_type=_a , num_groups_out=_a , embed_dim=block_out_channels[0] , out_channels=_a , act_fn=_a , fc_dim=block_out_channels[-1] // 4 , ) def lowercase ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : List[str] = True , ): _snake_case = timestep if not torch.is_tensor(_a ): _snake_case = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0: _snake_case = timesteps[None].to(sample.device ) _snake_case = self.time_proj(_a ) if self.config.use_timestep_embedding: _snake_case = self.time_mlp(_a ) else: _snake_case = timestep_embed[..., None] _snake_case = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) _snake_case = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down _snake_case = () for downsample_block in self.down_blocks: _snake_case , _snake_case = downsample_block(hidden_states=_a , temb=_a ) down_block_res_samples += res_samples # 3. mid if self.mid_block: _snake_case = self.mid_block(_a , _a ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): _snake_case = down_block_res_samples[-1:] _snake_case = down_block_res_samples[:-1] _snake_case = upsample_block(_a , res_hidden_states_tuple=_a , temb=_a ) # 5. post-process if self.out_block: _snake_case = self.out_block(_a , _a ) if not return_dict: return (sample,) return UNetaDOutput(sample=_a )
288
"""simple docstring""" import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_token_type_ids __a = use_input_mask __a = use_labels __a = use_mc_token_ids __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_labels __a = num_choices __a = scope __a = self.vocab_size - 1 def __UpperCAmelCase ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = None if self.use_mc_token_ids: __a = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __a = None __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a = ids_tensor([self.batch_size] , self.num_choices ) __a = self.get_config() __a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __UpperCAmelCase ( self ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ): __a = CTRLModel(config=_a ) model.to(_a ) model.eval() model(_a , token_type_ids=_a , head_mask=_a ) model(_a , token_type_ids=_a ) __a = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ): __a = CTRLLMHeadModel(_a ) model.to(_a ) model.eval() __a = model(_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self ): __a = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = config_and_inputs __a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __UpperCAmelCase ( self , _a , _a , _a , _a , *_a ): __a = self.num_labels __a = CTRLForSequenceClassification(_a ) model.to(_a ) model.eval() __a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a = model(_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () __UpperCAmelCase : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else () __UpperCAmelCase : Union[str, Any] = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = False def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __UpperCAmelCase ( self ): __a = CTRLModelTester(self ) __a = ConfigTester(self , config_class=_a , n_embd=37 ) def __UpperCAmelCase ( self ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __UpperCAmelCase ( self ): pass @slow def __UpperCAmelCase ( self ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = CTRLModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __UpperCAmelCase ( self ): pass @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __UpperCAmelCase ( self ): __a = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(_a ) __a = torch.tensor( [[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is __a = [ 11_859, 0, 1_611, 8, 5, 150, 26_449, 2, 19, 348, 469, 3, 2_595, 48, 20_740, 246_533, 246_533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __a = model.generate(_a , do_sample=_a ) self.assertListEqual(output_ids[0].tolist() , _a )
45
0
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
1
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Any: __UpperCamelCase ='hf-internal-testing/tiny-random-t5' __UpperCamelCase =AutoTokenizer.from_pretrained(A_ ) __UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ ) __UpperCamelCase =tokenizer('This is me' , return_tensors='pt' ) __UpperCamelCase =model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCamelCase =model.generate(**A_ ) __UpperCamelCase =model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ ) __UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCamelCase =model_reloaded.generate(**A_ ) self.assertTrue(torch.allclose(A_ , A_ ) ) def _a ( self ) -> str: __UpperCamelCase ='hf-internal-testing/tiny-random-t5' __UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ ) __UpperCamelCase =model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(A_ ): model.save_pretrained(A_ ) __UpperCamelCase =model.reverse_bettertransformer() model.save_pretrained(A_ )
62
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase_ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=3 , __UpperCamelCase=1_0 , __UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ): """simple docstring""" UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = image_size UpperCamelCase_ = num_channels UpperCamelCase_ = embeddings_size UpperCamelCase_ = hidden_sizes UpperCamelCase_ = depths UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = hidden_act UpperCamelCase_ = num_labels UpperCamelCase_ = scope UpperCamelCase_ = len(__UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ = self.get_config() return config, pixel_values def lowerCamelCase_ ( self ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = FlaxRegNetModel(config=__UpperCamelCase ) UpperCamelCase_ = model(__UpperCamelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = self.num_labels UpperCamelCase_ = FlaxRegNetForImageClassification(config=__UpperCamelCase ) UpperCamelCase_ = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ , UpperCamelCase_ = config_and_inputs UpperCamelCase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): A__ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () A__ : Any = False A__ : List[Any] = False A__ : Dict = False def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = FlaxRegNetModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ): """simple docstring""" return def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self ): """simple docstring""" pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self ): """simple docstring""" pass def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(__UpperCamelCase ) UpperCamelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ = model_class(__UpperCamelCase ) UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase_ = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 ) UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase_ = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = model_class(__UpperCamelCase ) @jax.jit def model_jitted(__UpperCamelCase , **__UpperCamelCase ): return model(pixel_values=__UpperCamelCase , **__UpperCamelCase ) with self.subTest("""JIT Enabled""" ): UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple() self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( ) -> Tuple: UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class lowercase_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self ): """simple docstring""" return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""np""" ) UpperCamelCase_ = model(**__UpperCamelCase ) # verify the logits UpperCamelCase_ = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) UpperCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
122
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : str = "Salesforce/blip-image-captioning-base" __UpperCAmelCase : Any = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) __UpperCAmelCase : Any = "image_captioner" __UpperCAmelCase : Optional[Any] = AutoModelForVisionaSeq __UpperCAmelCase : Tuple = ["image"] __UpperCAmelCase : int = ["text"] def __init__( self : Tuple, *UpperCAmelCase__ : Optional[int], **UpperCAmelCase__ : Dict ): requires_backends(self, ["vision"] ) super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ ) def _lowercase ( self : str, UpperCAmelCase__ : "Image" ): return self.pre_processor(images=UpperCAmelCase__, return_tensors="pt" ) def _lowercase ( self : str, UpperCAmelCase__ : Optional[int] ): return self.model.generate(**UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0].strip()
144
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : List[Any] = "swin2sr" __UpperCAmelCase : List[Any] = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any, UpperCAmelCase__ : Dict=6_4, UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : Dict=3, UpperCAmelCase__ : Optional[Any]=1_8_0, UpperCAmelCase__ : Any=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Dict=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Tuple=8, UpperCAmelCase__ : Optional[int]=2.0, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Tuple=0.0, UpperCAmelCase__ : Optional[Any]=0.0, UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Dict="gelu", UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : Dict=0.02, UpperCAmelCase__ : Tuple=1E-5, UpperCAmelCase__ : str=2, UpperCAmelCase__ : str=1.0, UpperCAmelCase__ : Optional[int]="1conv", UpperCAmelCase__ : Dict="pixelshuffle", **UpperCAmelCase__ : List[Any], ): super().__init__(**UpperCAmelCase__ ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(UpperCAmelCase__ ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range __lowercase = upscale __lowercase = img_range __lowercase = resi_connection __lowercase = upsampler
144
1
"""simple docstring""" import re import string import numpy as np import datasets lowercase__ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' lowercase__ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' lowercase__ = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def A_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def A_ ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: _lowerCamelCase : Dict = np.array([re.sub(UpperCamelCase__ , '' , UpperCamelCase__ ) for x in predictions] ) _lowerCamelCase : Any = np.array([re.sub(UpperCamelCase__ , '' , UpperCamelCase__ ) for x in references] ) else: _lowerCamelCase : List[Any] = np.asarray(UpperCamelCase__ ) _lowerCamelCase : Union[str, Any] = np.asarray(UpperCamelCase__ ) if ignore_case: _lowerCamelCase : str = np.char.lower(UpperCamelCase__ ) _lowerCamelCase : Optional[Any] = np.char.lower(UpperCamelCase__ ) if ignore_punctuation: _lowerCamelCase : Union[str, Any] = string.punctuation.maketrans('' , '' , string.punctuation ) _lowerCamelCase : List[str] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) _lowerCamelCase : Any = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) if ignore_numbers: _lowerCamelCase : Union[str, Any] = string.digits.maketrans('' , '' , string.digits ) _lowerCamelCase : Optional[int] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) _lowerCamelCase : str = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) _lowerCamelCase : Optional[int] = predictions == references return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
96
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
0
from __future__ import annotations def a__ ( UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str , ) -> int: UpperCAmelCase : Optional[Any] = len(UpperCAmelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(UpperCAmelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCAmelCase , UpperCAmelCase , ) def a__ ( UpperCAmelCase : Dict ) -> Optional[Any]: UpperCAmelCase : Any = [] depth_first_search([] , [] , [] , UpperCAmelCase , UpperCAmelCase ) # Print all the boards for board in boards: for column in board: print(UpperCAmelCase ) print('''''' ) print(len(UpperCAmelCase ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
355
from __future__ import annotations import queue class __UpperCAmelCase : def __init__( self : str, __A : Union[str, Any] ): UpperCAmelCase : Dict = data UpperCAmelCase : Tuple = None UpperCAmelCase : Any = None def a__ ( ) -> TreeNode: print('''\n********Press N to stop entering at any point of time********\n''' ) UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower() UpperCAmelCase : queue.Queue = queue.Queue() UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) ) q.put(UpperCAmelCase ) while not q.empty(): UpperCAmelCase : int = q.get() UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: ''' UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n''' if check == "n": return tree_node UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) ) UpperCAmelCase : List[str] = left_node q.put(UpperCAmelCase ) UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: ''' UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n''' if check == "n": return tree_node UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) ) UpperCAmelCase : Dict = right_node q.put(UpperCAmelCase ) raise def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return UpperCAmelCase : queue.Queue = queue.Queue() q.put(UpperCAmelCase ) while not q.empty(): UpperCAmelCase : List[Any] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return UpperCAmelCase : queue.Queue = queue.Queue() q.put(UpperCAmelCase ) while not q.empty(): UpperCAmelCase : int = [] while not q.empty(): UpperCAmelCase : List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(UpperCAmelCase ) def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return UpperCAmelCase : list[TreeNode] = [] UpperCAmelCase : List[str] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(UpperCAmelCase ) UpperCAmelCase : Dict = n.left # end of while means current node doesn't have left child UpperCAmelCase : Union[str, Any] = stack.pop() # start to traverse its right child UpperCAmelCase : List[str] = n.right def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return UpperCAmelCase : list[TreeNode] = [] UpperCAmelCase : Any = node while n or stack: while n: stack.append(UpperCAmelCase ) UpperCAmelCase : Dict = n.left UpperCAmelCase : Optional[int] = stack.pop() print(n.data , end=''',''' ) UpperCAmelCase : Any = n.right def a__ ( UpperCAmelCase : TreeNode ) -> None: if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node: return UpperCAmelCase , UpperCAmelCase : Dict = [], [] UpperCAmelCase : Any = node stacka.append(UpperCAmelCase ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(UpperCAmelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str: if not s: return "\n" + width * char UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) _lowerCamelCase : TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
99
0
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _lowerCAmelCase ( __snake_case : Features ) -> Optional[int]: __A : Union[str, Any] = np.inf def set_batch_size(__snake_case : FeatureType ) -> None: nonlocal batch_size if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __A : str = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __A : int = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary": __A : Tuple = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return None if batch_size is np.inf else batch_size class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ ): def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' super().__init__( _UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , ) __A : Tuple = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase) else {self.split: path_or_paths} __A : Optional[int] = _PACKAGED_DATASETS_MODULES["""parquet"""][1] __A : Union[str, Any] = Parquet( cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , hash=_UpperCAmelCase , **_UpperCAmelCase , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' if self.streaming: __A : Any = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __A : List[Any] = None __A : List[str] = None __A : Optional[int] = None __A : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , ) __A : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory) return dataset class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' __A : Union[str, Any] = dataset __A : List[Any] = path_or_buf __A : List[Any] = batch_size or get_writer_batch_size(dataset.features) __A : List[str] = parquet_writer_kwargs def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , 'wb+') as buffer: __A : int = self._write(file_obj=_UpperCAmelCase , batch_size=_UpperCAmelCase , **self.parquet_writer_kwargs) else: __A : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=_UpperCAmelCase , **self.parquet_writer_kwargs) return written def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' __A : Tuple = 0 __A : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , _UpperCAmelCase) __A : Tuple = self.dataset.features.arrow_schema __A : Dict = pq.ParquetWriter(_UpperCAmelCase , schema=_UpperCAmelCase , **_UpperCAmelCase) for offset in logging.tqdm( range(0 , len(self.dataset) , _UpperCAmelCase) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __A : Union[str, Any] = query_table( table=self.dataset._data , key=slice(_UpperCAmelCase , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(_UpperCAmelCase) written += batch.nbytes writer.close() return written
190
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , ) _snake_case : Tuple = field _snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths} _snake_case : int = Json( cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , ) def UpperCamelCase_ ( self : Any) -> Tuple: """simple docstring""" if self.streaming: _snake_case : int = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _snake_case : Dict = None _snake_case : Optional[int] = None _snake_case : Optional[Any] = None _snake_case : str = None self.builder.download_and_prepare( download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , ) _snake_case : List[str] = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory) return dataset class snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''') _snake_case : Optional[Any] = dataset _snake_case : str = path_or_buf _snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _snake_case : Tuple = num_proc _snake_case : Dict = """utf-8""" _snake_case : str = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any]) -> int: """simple docstring""" _snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase) _snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""") _snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False) _snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True) _snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''') if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer: _snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' """ was passed. Please provide a local path instead.""") _snake_case : Tuple = self._write( file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs) return written def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args _snake_case : int = query_table( table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , ) _snake_case : Optional[Any] = batch.to_pandas().to_json( path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase) if not json_str.endswith("""\n"""): json_str += "\n" return json_str.encode(self.encoding) def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int: """simple docstring""" _snake_case : Optional[int] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): _snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs)) written += file_obj.write(lowerCAmelCase) else: _snake_case , _snake_case : str = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowerCAmelCase) return written
317
0
"""simple docstring""" from __future__ import annotations import numpy as np def snake_case ( A__ ): return np.maximum(0 ,A__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
253
"""simple docstring""" from math import factorial def snake_case ( A__ = 1_00 ): return sum(int(A__ ) for x in str(factorial(A__ ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
253
1
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=[1, 1_6, 4, 4] , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Tuple = parent UpperCAmelCase : Union[str, Any] = batch_size UpperCAmelCase : int = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : str = is_training UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : List[Any] = intermediate_size UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase : Optional[Any] = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[int] = scope UpperCAmelCase : Dict = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : Tuple = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Optional[int] = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Dict = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 1_6, 3_2], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=snake_case , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = ViTHybridModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.type_sequence_label_size UpperCAmelCase : Tuple = ViTHybridForImageClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs UpperCAmelCase : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : int = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ViTHybridModelTester(self ) UpperCAmelCase : int = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(snake_case ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(config=snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : List[str] = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def A_ ( self ): '''simple docstring''' for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = ViTHybridModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( snake_case ) UpperCAmelCase : List[Any] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**snake_case ) # verify the logits UpperCAmelCase : Dict = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) ) @slow @require_accelerate def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" ) UpperCAmelCase : Optional[Any] = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" ) UpperCAmelCase : int = prepare_img() UpperCAmelCase : Dict = image_processor(images=snake_case , return_tensors="pt" ) UpperCAmelCase : Tuple = model(**snake_case ) UpperCAmelCase : Dict = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Optional[Any] = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case( _lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"] UpperCAmelCase : Tuple = "BlipImageProcessor" UpperCAmelCase : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , A_ , A_ ) -> Dict: lowerCAmelCase = False super().__init__(A_ , A_ ) lowerCAmelCase = self.image_processor def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding: if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowerCAmelCase = self.tokenizer lowerCAmelCase = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) return text_encoding # add pixel_values lowerCAmelCase = self.image_processor(A_ , return_tensors=A_ ) if text is not None: lowerCAmelCase = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) else: lowerCAmelCase = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def __snake_case ( self , *A_ , **A_ ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ , **A_ ) def __snake_case ( self , *A_ , **A_ ) -> Tuple: return self.tokenizer.decode(*A_ , **A_ ) @property def __snake_case ( self ) -> str: lowerCAmelCase = self.tokenizer.model_input_names lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
187
'''simple docstring''' import numpy class __snake_case: '''simple docstring''' def __init__( self , A_ , A_ ) -> None: lowerCAmelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCAmelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCAmelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCAmelCase = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCAmelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCAmelCase = numpy.zeros(output_array.shape ) def __snake_case ( self ) -> numpy.ndarray: lowerCAmelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __snake_case ( self ) -> None: lowerCAmelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCAmelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCAmelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __snake_case ( self , A_ , A_ , A_ ) -> None: for iteration in range(1 , iterations + 1 ): lowerCAmelCase = self.feedforward() self.back_propagation() if give_loss: lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'Iteration {iteration} Loss: {loss}' ) def __snake_case ( self , A_ ) -> int: lowerCAmelCase = input_arr lowerCAmelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray: """simple docstring""" return 1 / (1 + numpy.exp(-value )) def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray: """simple docstring""" return (value) * (1 - (value)) def _snake_case ( ) -> int: """simple docstring""" lowerCAmelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCAmelCase = TwoHiddenLayerNeuralNetwork( input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=10 , give_loss=_SCREAMING_SNAKE_CASE ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
187
1
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowercase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[int] ): lowerCamelCase_ : str ="| <pad> <unk> <s> </s> a b c d e f g h i j k".split() lowerCamelCase_ : Optional[int] =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowerCamelCase_ : Any ={ "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } lowerCamelCase_ : Any ={ "feature_size": 1, "padding_value": 0.0, "sampling_rate": 1_6000, "return_attention_mask": False, "do_normalize": True, } lowerCamelCase_ : str =tempfile.mkdtemp() lowerCamelCase_ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ : Optional[Any] =os.path.join(self.tmpdirname , snake_case__ ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) # load decoder from hub lowerCamelCase_ : Dict ="hf-internal-testing/ngram-beam-search-decoder" def UpperCAmelCase__ ( self : Tuple , **snake_case__ : str ): lowerCamelCase_ : Optional[Any] =self.add_kwargs_tokens_map.copy() kwargs.update(snake_case__ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self : List[str] , **snake_case__ : Dict ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self : Tuple , **snake_case__ : Any ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **snake_case__ ) def UpperCAmelCase__ ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : List[str] =self.get_tokenizer() lowerCamelCase_ : str =self.get_feature_extractor() lowerCamelCase_ : List[Any] =self.get_decoder() lowerCamelCase_ : str =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ : str =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , snake_case__ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , snake_case__ ) def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : Union[str, Any] =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ : Dict =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : List[Any] =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(snake_case__ , "include" ): WavaVecaProcessorWithLM( tokenizer=snake_case__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : Optional[int] =self.get_feature_extractor() lowerCamelCase_ : Any =self.get_tokenizer() lowerCamelCase_ : Optional[int] =self.get_decoder() lowerCamelCase_ : int =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) lowerCamelCase_ : str =floats_list((3, 1000) ) lowerCamelCase_ : Optional[Any] =feature_extractor(snake_case__ , return_tensors="np" ) lowerCamelCase_ : str =processor(snake_case__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase__ ( self : str ): lowerCamelCase_ : Any =self.get_feature_extractor() lowerCamelCase_ : Optional[int] =self.get_tokenizer() lowerCamelCase_ : int =self.get_decoder() lowerCamelCase_ : Optional[int] =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) lowerCamelCase_ : List[Any] ="This is a test string" lowerCamelCase_ : List[Any] =processor(text=snake_case__ ) lowerCamelCase_ : Optional[Any] =tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self : List[str] , snake_case__ : str=(2, 10, 16) , snake_case__ : str=77 ): np.random.seed(snake_case__ ) return np.random.rand(*snake_case__ ) def UpperCAmelCase__ ( self : str ): lowerCamelCase_ : Optional[int] =self.get_feature_extractor() lowerCamelCase_ : str =self.get_tokenizer() lowerCamelCase_ : Optional[int] =self.get_decoder() lowerCamelCase_ : Optional[Any] =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) lowerCamelCase_ : Tuple =self._get_dummy_logits(shape=(10, 16) , seed=13 ) lowerCamelCase_ : int =processor.decode(snake_case__ ) lowerCamelCase_ : Union[str, Any] =decoder.decode_beams(snake_case__ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("</s> <s> </s>" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[int] ): lowerCamelCase_ : Union[str, Any] =self.get_feature_extractor() lowerCamelCase_ : Dict =self.get_tokenizer() lowerCamelCase_ : Optional[int] =self.get_decoder() lowerCamelCase_ : Any =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) lowerCamelCase_ : List[str] =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ : Any =processor.batch_decode(snake_case__ ) else: with get_context(snake_case__ ).Pool() as pool: lowerCamelCase_ : int =processor.batch_decode(snake_case__ , snake_case__ ) lowerCamelCase_ : Optional[int] =list(snake_case__ ) with get_context("fork" ).Pool() as p: lowerCamelCase_ : int =decoder.decode_beams_batch(snake_case__ , snake_case__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(snake_case__ , decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text ) self.assertListEqual(snake_case__ , decoded_processor.logit_score ) self.assertListEqual(snake_case__ , decoded_processor.lm_score ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : List[Any] =self.get_feature_extractor() lowerCamelCase_ : Dict =self.get_tokenizer() lowerCamelCase_ : Optional[Any] =self.get_decoder() lowerCamelCase_ : Tuple =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) lowerCamelCase_ : Optional[Any] =self._get_dummy_logits() lowerCamelCase_ : Union[str, Any] =15 lowerCamelCase_ : str =-20.0 lowerCamelCase_ : Tuple =-4.0 lowerCamelCase_ : Optional[Any] =processor.batch_decode( snake_case__ , beam_width=snake_case__ , beam_prune_logp=snake_case__ , token_min_logp=snake_case__ , ) lowerCamelCase_ : Tuple =decoded_processor_out.text lowerCamelCase_ : List[str] =list(snake_case__ ) with get_context("fork" ).Pool() as pool: lowerCamelCase_ : str =decoder.decode_beams_batch( snake_case__ , snake_case__ , beam_width=snake_case__ , beam_prune_logp=snake_case__ , token_min_logp=snake_case__ , ) lowerCamelCase_ : str =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ : str =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ : Union[str, Any] =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , snake_case__ ) self.assertTrue(np.array_equal(snake_case__ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , snake_case__ , atol=1E-3 ) ) self.assertTrue(np.array_equal(snake_case__ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , snake_case__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : str ): lowerCamelCase_ : Union[str, Any] =self.get_feature_extractor() lowerCamelCase_ : Any =self.get_tokenizer() lowerCamelCase_ : Optional[Any] =self.get_decoder() lowerCamelCase_ : Optional[int] =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) lowerCamelCase_ : List[Any] =self._get_dummy_logits() lowerCamelCase_ : Tuple =2.0 lowerCamelCase_ : int =5.0 lowerCamelCase_ : Dict =-20.0 lowerCamelCase_ : str =True lowerCamelCase_ : Optional[Any] =processor.batch_decode( snake_case__ , alpha=snake_case__ , beta=snake_case__ , unk_score_offset=snake_case__ , lm_score_boundary=snake_case__ , ) lowerCamelCase_ : str =decoded_processor_out.text lowerCamelCase_ : Tuple =list(snake_case__ ) decoder.reset_params( alpha=snake_case__ , beta=snake_case__ , unk_score_offset=snake_case__ , lm_score_boundary=snake_case__ , ) with get_context("fork" ).Pool() as pool: lowerCamelCase_ : Dict =decoder.decode_beams_batch( snake_case__ , snake_case__ , ) lowerCamelCase_ : Optional[Any] =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , snake_case__ ) lowerCamelCase_ : List[Any] =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , snake_case__ ) def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : Any =WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase_ : List[str] =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ : List[Any] =Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() lowerCamelCase_ : Dict =os.listdir(snake_case__ ) lowerCamelCase_ : Optional[int] =["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Dict =snapshot_download("hf-internal-testing/processor_with_lm" ) lowerCamelCase_ : Any =WavaVecaProcessorWithLM.from_pretrained(snake_case__ ) lowerCamelCase_ : Optional[int] =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ : Dict =Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() lowerCamelCase_ : Dict =os.listdir(snake_case__ ) lowerCamelCase_ : List[str] =os.listdir(snake_case__ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Dict =WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase_ : Dict =AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase_ : Any =floats_list((3, 1000) ) lowerCamelCase_ : Union[str, Any] =processor_wavaveca(snake_case__ , return_tensors="np" ) lowerCamelCase_ : str =processor_auto(snake_case__ , return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) lowerCamelCase_ : Optional[int] =self._get_dummy_logits() lowerCamelCase_ : int =processor_wavaveca.batch_decode(snake_case__ ) lowerCamelCase_ : Dict =processor_auto.batch_decode(snake_case__ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : Optional[int] =self.get_feature_extractor() lowerCamelCase_ : Optional[Any] =self.get_tokenizer() lowerCamelCase_ : Optional[int] =self.get_decoder() lowerCamelCase_ : Optional[Any] =WavaVecaProcessorWithLM(tokenizer=snake_case__ , feature_extractor=snake_case__ , decoder=snake_case__ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) @staticmethod def UpperCAmelCase__ ( snake_case__ : List[Any] , snake_case__ : Any ): lowerCamelCase_ : str =[d[key] for d in offsets] return retrieved_list def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : Dict =WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase_ : Union[str, Any] =self._get_dummy_logits()[0] lowerCamelCase_ : Optional[int] =processor.decode(snake_case__ , output_word_offsets=snake_case__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(snake_case__ , snake_case__ ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : int =WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) lowerCamelCase_ : Optional[int] =self._get_dummy_logits() lowerCamelCase_ : Tuple =processor.batch_decode(snake_case__ , output_word_offsets=snake_case__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(snake_case__ , snake_case__ ) ) self.assertListEqual( [" ".join(self.get_from_offsets(snake_case__ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def UpperCAmelCase__ ( self : int ): import torch lowerCamelCase_ : Any =load_dataset("common_voice" , "en" , split="train" , streaming=snake_case__ ) lowerCamelCase_ : int =ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6000 ) ) lowerCamelCase_ : Optional[int] =iter(snake_case__ ) lowerCamelCase_ : int =next(snake_case__ ) lowerCamelCase_ : Optional[int] =AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) lowerCamelCase_ : Union[str, Any] =WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ : Optional[Any] =processor(sample["audio"]["array"] , return_tensors="pt" ).input_values with torch.no_grad(): lowerCamelCase_ : Tuple =model(snake_case__ ).logits.cpu().numpy() lowerCamelCase_ : Tuple =processor.decode(logits[0] , output_word_offsets=snake_case__ ) lowerCamelCase_ : List[str] =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ : Optional[int] =[ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] lowerCamelCase_ : str ="WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(snake_case__ , "word" ) ) , snake_case__ ) self.assertEqual(" ".join(self.get_from_offsets(snake_case__ , "word" ) ) , output.text ) # output times lowerCamelCase_ : Any =torch.tensor(self.get_from_offsets(snake_case__ , "start_time" ) ) lowerCamelCase_ : Tuple =torch.tensor(self.get_from_offsets(snake_case__ , "end_time" ) ) # fmt: off lowerCamelCase_ : Dict =torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) lowerCamelCase_ : Optional[Any] =torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=0.01 ) ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=0.01 ) )
144
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
144
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger _UpperCAmelCase : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: " _UpperCAmelCase : List[Any] = "=======\n>>>>>>>\n" _UpperCAmelCase : Optional[int] = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] _UpperCAmelCase : Optional[Any] = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value('\1')"), (R"tfds\.features\.Text\(\)", R"datasets.Value('string')"), (R"tfds\.features\.Text\(", R"datasets.Value('string'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def A ( lowercase ) -> Optional[Any]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class lowercase ( _SCREAMING_SNAKE_CASE ): @staticmethod def __UpperCamelCase ( A_ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=A_ , required=A_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=A_ , required=A_ , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=A_ ) def __init__( self , A_ , A_ , *A_ ) -> Any: """simple docstring""" UpperCamelCase = get_logger('datasets-cli/converting' ) UpperCamelCase = tfds_path UpperCamelCase = datasets_directory def __UpperCamelCase ( self ) -> int: """simple docstring""" if os.path.isdir(self._tfds_path ): UpperCamelCase = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): UpperCamelCase = os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) UpperCamelCase = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = {} if os.path.isdir(self._tfds_path ): UpperCamelCase = os.listdir(A_ ) else: UpperCamelCase = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) UpperCamelCase = os.path.join(A_ , A_ ) UpperCamelCase = os.path.join(A_ , A_ ) if not os.path.isfile(A_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(A_ , encoding='utf-8' ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [] UpperCamelCase = False UpperCamelCase = False UpperCamelCase = [] for line in lines: UpperCamelCase = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: UpperCamelCase = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here UpperCamelCase = '' continue elif "from absl import logging" in out_line: UpperCamelCase = 'from datasets import logging\n' elif "getLogger" in out_line: UpperCamelCase = out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): UpperCamelCase = True UpperCamelCase = list(filter(lambda A_ : e in out_line , A_ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A_ ) + '\n' ) out_lines.append(A_ ) out_lines.append(A_ ) continue else: for pattern, replacement in TO_CONVERT: UpperCamelCase = re.sub(A_ , A_ , A_ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: UpperCamelCase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , A_ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) UpperCamelCase = 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: UpperCamelCase = True out_lines.append(A_ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset UpperCamelCase = f_name.replace('.py' , '' ) UpperCamelCase = os.path.join(A_ , A_ ) UpperCamelCase = os.path.join(A_ , A_ ) os.makedirs(A_ , exist_ok=A_ ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(A_ ) if needs_manual_update: with_manual_update.append(A_ ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.writelines(A_ ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: UpperCamelCase = os.path.basename(A_ ) UpperCamelCase = imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(A_ , A_ ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
110
import os import sys import unittest _UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers") class lowercase ( unittest.TestCase ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = find_backend(' if not is_torch_available():' ) self.assertEqual(A_ , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") UpperCamelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(A_ , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") UpperCamelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(A_ , 'torch_and_transformers_and_onnx' ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A_ ) self.assertIn('torch_and_transformers' , A_ ) self.assertIn('flax_and_transformers' , A_ ) self.assertIn('torch_and_transformers_and_onnx' , A_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(A_ , '\nCONSTANT = None\n' ) UpperCamelCase = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(A_ , A_ ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , A_ )
110
1
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = """The Nymphenburg Palace is a beautiful palace in Munich!""" def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1e-5, '''token_type_vocab_size''': 2, } SCREAMING_SNAKE_CASE_ : str = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py SCREAMING_SNAKE_CASE_ : Dict = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__a , output_all_encodings=__a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __a ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later SCREAMING_SNAKE_CASE_ : str = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(get_home_dir() , '''models''' ) SCREAMING_SNAKE_CASE_ : List[Any] = _load_vocab(__a , __a , __a , cls=__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = nlp.model.BERTModel( __a , len(__a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__a , use_token_type_embed=__a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__a , use_decoder=__a , ) original_bort.load_parameters(__a , cast_dtype=__a , ignore_extra=__a ) SCREAMING_SNAKE_CASE_ : List[str] = original_bort._collect_params_with_prefix() # Build our config 🤗 SCREAMING_SNAKE_CASE_ : List[Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(__a ), } SCREAMING_SNAKE_CASE_ : List[str] = BertConfig.from_dict(__a ) SCREAMING_SNAKE_CASE_ : str = BertForMaskedLM(__a ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(__a ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(__a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = hf_param.shape SCREAMING_SNAKE_CASE_ : Optional[int] = to_torch(params[gluon_param] ) SCREAMING_SNAKE_CASE_ : Tuple = gluon_param.shape assert ( shape_hf == shape_gluon ), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param SCREAMING_SNAKE_CASE_ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) SCREAMING_SNAKE_CASE_ : Dict = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): SCREAMING_SNAKE_CASE_ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention SCREAMING_SNAKE_CASE_ : BertSelfAttention = layer.attention.self SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params( self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) SCREAMING_SNAKE_CASE_ : str = check_and_map_params( self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) SCREAMING_SNAKE_CASE_ : Any = check_and_map_params( self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) SCREAMING_SNAKE_CASE_ : str = check_and_map_params( self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params( self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params( self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output SCREAMING_SNAKE_CASE_ : BertSelfOutput = layer.attention.output SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params( self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' ) SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params( self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' ) SCREAMING_SNAKE_CASE_ : Any = check_and_map_params( self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' ) SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params( self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate SCREAMING_SNAKE_CASE_ : BertIntermediate = layer.intermediate SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params( intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) SCREAMING_SNAKE_CASE_ : Any = check_and_map_params( intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output SCREAMING_SNAKE_CASE_ : BertOutput = layer.output SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params( bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params( bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params( bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) SCREAMING_SNAKE_CASE_ : str = check_and_map_params( bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models SCREAMING_SNAKE_CASE_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode_plus(__a )['''input_ids'''] # Get gluon output SCREAMING_SNAKE_CASE_ : List[str] = mx.nd.array([input_ids] ) SCREAMING_SNAKE_CASE_ : int = original_bort(inputs=__a , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(__a ) SCREAMING_SNAKE_CASE_ : List[str] = BertModel.from_pretrained(__a ) hf_bort_model.eval() SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode_plus(__a , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE_ : List[Any] = hf_bort_model(**__a )[0] SCREAMING_SNAKE_CASE_ : List[Any] = output_gluon[0].asnumpy() SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_hf[0].detach().numpy() SCREAMING_SNAKE_CASE_ : int = np.max(np.abs(hf_layer - gluon_layer ) ).item() SCREAMING_SNAKE_CASE_ : Tuple = np.allclose(__a , __a , atol=1e-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , __a ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ : Optional[Any] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
91
import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Dict = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes')) self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads')) self.parent.assertTrue(hasattr(lowercase , 'num_encoder_blocks')) class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Tuple: '''simple docstring''' a__ : Optional[Any] = parent a__ : int = batch_size a__ : Tuple = image_size a__ : Union[str, Any] = num_channels a__ : str = num_encoder_blocks a__ : Dict = sr_ratios a__ : Dict = depths a__ : Union[str, Any] = hidden_sizes a__ : str = downsampling_rates a__ : Tuple = num_attention_heads a__ : Optional[Any] = is_training a__ : Union[str, Any] = use_labels a__ : Any = hidden_act a__ : Optional[int] = hidden_dropout_prob a__ : int = attention_probs_dropout_prob a__ : Optional[Any] = initializer_range a__ : Tuple = num_labels a__ : Union[str, Any] = scope def __lowercase ( self) -> Any: '''simple docstring''' a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ : str = None if self.use_labels: a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) a__ : Any = self.get_config() return config, pixel_values, labels def __lowercase ( self) -> Any: '''simple docstring''' return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __lowercase ( self , lowercase , lowercase , lowercase) -> Dict: '''simple docstring''' a__ : Dict = SegformerModel(config=lowercase) model.to(lowercase) model.eval() a__ : Optional[Any] = model(lowercase) a__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)) def __lowercase ( self , lowercase , lowercase , lowercase) -> str: '''simple docstring''' a__ : Optional[Any] = self.num_labels a__ : List[str] = SegformerForSemanticSegmentation(lowercase) model.to(lowercase) model.eval() a__ : List[str] = model(lowercase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)) a__ : int = model(lowercase , labels=lowercase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4)) self.parent.assertGreater(result.loss , 0.0) def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : Union[str, Any] = 1 a__ : Optional[int] = SegformerForSemanticSegmentation(config=lowercase) model.to(lowercase) model.eval() a__ : Union[str, Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase) a__ : Optional[Any] = model(lowercase , labels=lowercase) self.parent.assertGreater(result.loss , 0.0) def __lowercase ( self) -> int: '''simple docstring''' a__ : Any = self.prepare_config_and_inputs() a__ , a__ , a__ : str = config_and_inputs a__ : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __A : List[str] = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __A : List[str] = True __A : Any = False __A : Any = False __A : str = False def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Union[str, Any] = SegformerModelTester(self) a__ : Optional[Any] = SegformerConfigTester(self , config_class=lowercase) def __lowercase ( self) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> Dict: '''simple docstring''' a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase) def __lowercase ( self) -> Dict: '''simple docstring''' a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*lowercase) @unittest.skip('SegFormer does not use inputs_embeds') def __lowercase ( self) -> Tuple: '''simple docstring''' pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods') def __lowercase ( self) -> str: '''simple docstring''' pass def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowercase) a__ : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase) def __lowercase ( self) -> str: '''simple docstring''' a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() a__ : Tuple = True for model_class in self.all_model_classes: a__ : str = True a__ : List[str] = False a__ : int = True a__ : List[Any] = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase)) a__ : Optional[Any] = outputs.attentions a__ : Dict = sum(self.model_tester.depths) self.assertEqual(len(lowercase) , lowercase) # check that output_attentions also work using config del inputs_dict["output_attentions"] a__ : Dict = True a__ : int = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase)) a__ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase) , lowercase) # verify the first attentions (first block, first layer) a__ : Tuple = (self.model_tester.image_size // 4) ** 2 a__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) a__ : str = (self.model_tester.image_size // 32) ** 2 a__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) a__ : Dict = len(lowercase) # Check attention is always last and order is fine a__ : List[Any] = True a__ : Any = True a__ : Dict = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__ : int = model(**self._prepare_for_class(lowercase , lowercase)) self.assertEqual(out_len + 1 , len(lowercase)) a__ : int = outputs.attentions self.assertEqual(len(lowercase) , lowercase) # verify the first attentions (first block, first layer) a__ : List[Any] = (self.model_tester.image_size // 4) ** 2 a__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def __lowercase ( self) -> List[Any]: '''simple docstring''' def check_hidden_states_output(lowercase , lowercase , lowercase): a__ : Optional[Any] = model_class(lowercase) model.to(lowercase) model.eval() with torch.no_grad(): a__ : int = model(**self._prepare_for_class(lowercase , lowercase)) a__ : Union[str, Any] = outputs.hidden_states a__ : Any = self.model_tester.num_encoder_blocks self.assertEqual(len(lowercase) , lowercase) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = True check_hidden_states_output(lowercase , lowercase , lowercase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : int = True check_hidden_states_output(lowercase , lowercase , lowercase) def __lowercase ( self) -> Any: '''simple docstring''' if not self.model_tester.is_training: return a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(lowercase): continue a__ : Dict = model_class(lowercase) model.to(lowercase) model.train() a__ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase) a__ : Optional[int] = model(**lowercase).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' pass @slow def __lowercase ( self) -> Tuple: '''simple docstring''' for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Optional[Any] = SegformerModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) def A_ ( ) -> int: a__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self) -> Any: '''simple docstring''' a__ : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase) a__ : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to( lowercase) a__ : Optional[int] = prepare_img() a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt') a__ : List[str] = encoded_inputs.pixel_values.to(lowercase) with torch.no_grad(): a__ : Optional[int] = model(lowercase) a__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape , lowercase) a__ : Dict = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ]).to(lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-4)) @slow def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase) a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(lowercase) a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt') a__ : List[str] = encoded_inputs.pixel_values.to(lowercase) with torch.no_grad(): a__ : Optional[Any] = model(lowercase) a__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape , lowercase) a__ : Optional[Any] = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ]).to(lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-1)) @slow def __lowercase ( self) -> Dict: '''simple docstring''' a__ : List[str] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase) a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to( lowercase) a__ : Any = prepare_img() a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt') a__ : Optional[int] = encoded_inputs.pixel_values.to(lowercase) with torch.no_grad(): a__ : Union[str, Any] = model(lowercase) a__ : int = outputs.logits.detach().cpu() a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)]) a__ : Optional[Any] = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape , lowercase) a__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowercase) a__ : Union[str, Any] = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape , lowercase)
99
0
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __A : Optional[Any] = random.Random() if is_torch_available(): import torch def A_ ( snake_case_ : Any ,snake_case_ : Tuple=1.0 ,snake_case_ : Dict=None ,snake_case_ : str=None ): '''simple docstring''' if rng is None: UpperCamelCase : str = global_rng UpperCamelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1_6000 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , ): UpperCamelCase : Union[str, Any] = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : Tuple = min_seq_length UpperCamelCase : Any = max_seq_length UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Optional[Any] = feature_size UpperCamelCase : Optional[Any] = padding_value UpperCamelCase : str = sampling_rate UpperCamelCase : Tuple = return_attention_mask UpperCamelCase : Union[str, Any] = do_normalize def a_ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: UpperCamelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : int = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase ( a__ , unittest.TestCase ): lowercase : Optional[int] = ASTFeatureExtractor def a_ ( self ): UpperCamelCase : Dict = ASTFeatureExtractionTester(self ) def a_ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Any = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Any = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values UpperCamelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) ) # Test batched UpperCamelCase : Union[str, Any] = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""np""" ).input_values UpperCamelCase : str = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : str = np.asarray(_lowerCamelCase ) UpperCamelCase : Dict = feat_extract(_lowerCamelCase , return_tensors="""np""" ).input_values UpperCamelCase : int = feat_extract(_lowerCamelCase , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) ) @require_torch def a_ ( self ): import torch UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : Tuple = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a_ ( self , SCREAMING_SNAKE_CASE_ ): from datasets import load_dataset UpperCamelCase : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("""id""" ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def a_ ( self ): # fmt: off UpperCamelCase : Optional[int] = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on UpperCamelCase : Optional[Any] = self._load_datasamples(1 ) UpperCamelCase : Any = ASTFeatureExtractor() UpperCamelCase : List[Any] = feature_extractor(_lowerCamelCase , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCamelCase , atol=1e-4 ) )
369
"""simple docstring""" import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): def a_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a_ ( self ): UpperCamelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) UpperCamelCase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) UpperCamelCase : Dict = """xvjiarui/stable-diffusion-2-inpainting""" UpperCamelCase , UpperCamelCase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench""" UpperCamelCase : List[str] = jax.random.PRNGKey(0 ) UpperCamelCase : Tuple = 50 UpperCamelCase : Dict = jax.device_count() UpperCamelCase : Optional[int] = num_samples * [prompt] UpperCamelCase : int = num_samples * [init_image] UpperCamelCase : List[Any] = num_samples * [mask_image] UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # shard inputs and rng UpperCamelCase : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() ) UpperCamelCase : str = shard(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = shard(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = shard(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = pipeline( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = output.images.reshape(SCREAMING_SNAKE_CASE_ , 512 , 512 , 3 ) UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1] UpperCamelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase : Dict = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
27
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase : Any = logging.getLogger(__name__) class _A ( __magic_name__): def __init__( self , _SCREAMING_SNAKE_CASE=-1 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = label_idx def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : int = mode.value SCREAMING_SNAKE_CASE_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" ) SCREAMING_SNAKE_CASE_ : List[Any] = 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ : Dict = [] SCREAMING_SNAKE_CASE_ : Any = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) ) guid_index += 1 SCREAMING_SNAKE_CASE_ : Any = [] SCREAMING_SNAKE_CASE_ : Dict = [] else: SCREAMING_SNAKE_CASE_ : List[str] = line.split(' ' ) words.append(splits[0] ) if len(_SCREAMING_SNAKE_CASE ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) ) return examples def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(_SCREAMING_SNAKE_CASE ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: SCREAMING_SNAKE_CASE_ : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(_SCREAMING_SNAKE_CASE ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" if path: with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _A ( __magic_name__): def __init__( self ): """simple docstring""" super().__init__(label_idx=-2 ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" if path: with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: SCREAMING_SNAKE_CASE_ : int = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE_ : int = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _A ( __magic_name__): def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : Dict = mode.value SCREAMING_SNAKE_CASE_ : str = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" ) SCREAMING_SNAKE_CASE_ : Optional[int] = 1 SCREAMING_SNAKE_CASE_ : Tuple = [] with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: for sentence in parse_incr(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : List[str] = [] SCREAMING_SNAKE_CASE_ : List[str] = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) ) guid_index += 1 return examples def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 0 for sentence in parse_incr(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : List[str] = preds_list[example_id] SCREAMING_SNAKE_CASE_ : Any = '' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(_SCREAMING_SNAKE_CASE ) example_id += 1 def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" if path: with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
253
import os from typing import Dict, List, Tuple, TypeVar, Union lowerCAmelCase : str = TypeVar('T') lowerCAmelCase : Optional[Any] = Union[List[T], Tuple[T, ...]] lowerCAmelCase : str = Union[T, List[T], Dict[str, T]] lowerCAmelCase : Union[str, Any] = Union[str, bytes, os.PathLike]
253
1
def lowerCAmelCase__ ( ): '''simple docstring''' for n in range(1 ,1000000): yield n * (n + 1) // 2 def lowerCAmelCase__ ( lowerCamelCase_ : int): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : Union[str, Any] = 2 while i * i <= n: lowerCAmelCase__ : Optional[Any] = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ): '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase_) > 500) if __name__ == "__main__": print(solution())
94
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Any ={ 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] =['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[str] =[ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str =[ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict =[ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __snake_case : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
94
1
def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = [int(_A ) for i in ip_va_address.split("." ) if i.isdigit()] return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets ) if __name__ == "__main__": lowercase__ : str = input().strip() lowercase__ : str = "valid" if is_ip_va_address_valid(ip) else "invalid" print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
187
def lowerCamelCase__ ( _A ): '''simple docstring''' if num <= 0: raise ValueError("Input must be a positive integer" ) snake_case_ = [True] * (num + 1) snake_case_ = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , _A ): snake_case_ = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Tuple = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
187
1
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCAmelCase: int = logging.getLogger(__name__) lowerCAmelCase: str = 5_0 # max width of layer names lowerCAmelCase: List[Any] = 7_0 # max width of quantizer names def lowerCamelCase__ ( _A ): a : Optional[Any] = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_A , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_A , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_A , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_A , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_A , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_A , type=_A , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_A , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def lowerCamelCase__ ( _A ): if args.calibrator == "max": a : List[Any] = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) a : Any = 'histogram' elif args.calibrator == "mse": a : Dict = 'histogram' else: raise ValueError(f"""Invalid calibrator {args.calibrator}""" ) a : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=_A ) a : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_A ) quant_nn.QuantLinear.set_default_quant_desc_weight(_A ) def lowerCamelCase__ ( _A , _A , _A=False , _A=False ): logger.info('Configuring Model for Quantization' ) logger.info(f"""using quantization package {pytorch_quantization.__file__}""" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_A , ['embeddings'] , which='weight' , _disabled=_A ) if args.quant_disable: set_quantizer_by_name(_A , [''] , _disabled=_A ) if args.quant_disable_keyword: set_quantizer_by_name(_A , args.quant_disable_keyword , _disabled=_A ) if args.quant_disable_layer_module: set_quantizer_by_name(_A , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_A ) if args.quant_enable_layer_module: set_quantizer_by_name(_A , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_A ) if args.recalibrate_weights: recalibrate_weights(_A ) if args.fuse_qkv: fuse_qkv(_A , _A ) if args.clip_gelu: clip_gelu(_A , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_A ) def lowerCamelCase__ ( _A ): logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"""{name:80}: {module}""" ) def lowerCamelCase__ ( _A , _A ): logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_A ) def lowerCamelCase__ ( _A , _A ): def fusea(_A , _A , _A ): for mod in [qq, qk, qv]: if not hasattr(_A , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return a : Optional[Any] = qq._amax.detach().item() a : int = qk._amax.detach().item() a : Optional[int] = qv._amax.detach().item() a : List[Any] = max(_A , _A , _A ) qq._amax.fill_(_A ) qk._amax.fill_(_A ) qv._amax.fill_(_A ) logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(f"""FUSE_QKV: {name:{name_width}}""" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def lowerCamelCase__ ( _A , _A ): for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): a : Dict = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_A ) a : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" ) def lowerCamelCase__ ( _A ): for name, mod in model.named_modules(): if hasattr(_A , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: a : List[Any] = mod.weight.shape[0] a : Optional[Any] = mod._weight_quantizer._amax.detach() a : int = torch.ones(_A , dtype=amax.dtype , device=amax.device ) * amax print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" ) def lowerCamelCase__ ( _A ): for name, mod in model.named_modules(): if hasattr(_A , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) a : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) a : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set a : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_A , keepdims=_A ).detach() logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" ) a : Optional[int] = amax def lowerCamelCase__ ( _A , _A=25 , _A=180 , _A=None ): if ignore is None: a : Tuple = [] elif not isinstance(_A , _A ): a : Union[str, Any] = [ignore] a : List[Any] = 0 for name, mod in model.named_modules(): if not hasattr(_A , 'weight' ): continue a : Optional[int] = max(_A , len(_A ) ) for name, mod in model.named_modules(): a : Tuple = getattr(_A , '_input_quantizer' , _A ) a : int = getattr(_A , '_weight_quantizer' , _A ) if not hasattr(_A , 'weight' ): continue if type(_A ) in ignore: continue if [True for s in ignore if type(_A ) is str and s in name]: continue a : int = f"""Act:{input_q.extra_repr()}""" a : Optional[Any] = f"""Wgt:{weight_q.extra_repr()}""" a : Optional[Any] = f"""{name:{name_width}} {act_str} {wgt_str}""" if len(_A ) <= line_width: logger.info(_A ) else: logger.info(f"""{name:{name_width}} {act_str}""" ) logger.info(f"""{" ":{name_width}} {wgt_str}""" ) def lowerCamelCase__ ( _A ): a : str = 0 for name, mod in model.named_modules(): if isinstance(_A , pytorch_quantization.nn.TensorQuantizer ): print(f"""{name:80} {mod}""" ) count += 1 print(f"""{count} TensorQuantizers found in model""" ) def lowerCamelCase__ ( _A , _A , _A , _A , _A ): a : Any = getattr(_A , _A , _A ) if quantizer_mod is not None: assert hasattr(_A , _A ) setattr(_A , _A , _A ) else: logger.warning(f"""{name} has no {quantizer}""" ) def lowerCamelCase__ ( _A , _A , _A="both" , **_A ): a : Optional[int] = f"""Warning: changing {which} quantizers of {name:{qname_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" if which in ["input", "both"]: set_quantizer(_A , _A , '_input_quantizer' , _A , _A ) if which in ["weight", "both"]: set_quantizer(_A , _A , '_weight_quantizer' , _A , _A ) logger.info(_A ) def lowerCamelCase__ ( _A , _A , **_A ): for name, mod in model.named_modules(): if hasattr(_A , '_input_quantizer' ) or hasattr(_A , '_weight_quantizer' ): for n in names: if re.search(_A , _A ): set_quantizers(_A , _A , **_A ) elif name.endswith('_quantizer' ): for n in names: if re.search(_A , _A ): a : Optional[int] = f"""Warning: changing {name:{name_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" setattr(_A , _A , _A ) logger.info(_A )
96
'''simple docstring''' # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowerCamelCase__ ( _A , _A , _A=0 ): # Format the message. if name is None: a : Tuple = None else: a : Dict = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}' a : Tuple = fmt.format(_A ) # Print and recurse (if needed). if isinstance(_A , _A ): if msg is not None: print(_A ) for k in val.keys(): recursive_print(_A , val[k] , spaces + 2 ) elif isinstance(_A , torch.Tensor ): print(_A , ':' , val.size() ) else: print(_A , ':' , _A ) def lowerCamelCase__ ( _A , _A , _A , _A , _A ): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. a : str = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] a : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:] a : int = param.view(*_A ) a : List[str] = param.transpose(0 , 2 ) a : Union[str, Any] = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] a : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:] a : List[str] = param.view(*_A ) a : Union[str, Any] = param.transpose(0 , 1 ).contiguous() a : List[Any] = param.view(*_A ) return param def lowerCamelCase__ ( _A , _A , _A ): # The converted output model. a : Optional[Any] = {} # old versions did not store training args a : Dict = input_state_dict.get('args' , _A ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) a : Union[str, Any] = ds_args.padded_vocab_size a : str = ds_args.max_position_embeddings a : Dict = ds_args.hidden_size a : Union[str, Any] = ds_args.num_layers a : Dict = ds_args.num_attention_heads a : int = ds_args.ffn_hidden_size # pprint(config) # The number of heads. a : Any = config.n_head # The hidden_size per head. a : Tuple = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): a : Any = input_state_dict['checkpoint_version'] else: a : Any = 0.0 # The model. a : Optional[int] = input_state_dict['model'] # The language model. a : Optional[Any] = model['language_model'] # The embeddings. a : List[str] = lm['embedding'] # The word embeddings. a : List[Any] = embeddings['word_embeddings']['weight'] # Truncate the embedding table to vocab_size rows. a : Dict = word_embeddings[: config.vocab_size, :] a : int = word_embeddings # The position embeddings. a : Tuple = embeddings['position_embeddings']['weight'] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] a : List[str] = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" ) # Store the position embeddings. a : Optional[Any] = pos_embeddings # The transformer. a : Union[str, Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder'] # The regex to extract layer names. a : List[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' ) # The simple map of names for "automated" rules. a : Optional[Any] = { 'attention.dense': '.attn.c_proj.', 'self_attention.dense': '.attn.c_proj.', 'mlp.dense_h_to_4h': '.mlp.c_fc.', 'mlp.dense_4h_to_h': '.mlp.c_proj.', } # Extract the layers. for key, val in transformer.items(): # Match the name. a : Tuple = layer_re.match(_A ) # Stop if that's not a layer if m is None: break # The index of the layer. a : Union[str, Any] = int(m.group(1 ) ) # The name of the operation. a : Optional[int] = m.group(2 ) # Is it a weight or a bias? a : Optional[int] = m.group(3 ) # The name of the layer. a : Any = f"""transformer.h.{layer_idx}""" # For layernorm(s), simply store the layer norm. if op_name.endswith('layernorm' ): a : str = 'ln_1' if op_name.startswith('input' ) else 'ln_2' a : Tuple = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. a : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _A , _A ) a : Optional[Any] = causal_mask # Insert a "dummy" tensor for masked_bias. a : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa ) a : List[str] = masked_bias a : Union[str, Any] = fix_query_key_value_ordering(_A , _A , 3 , _A , _A ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. a : int = out_val.transpose(0 , 1 ).contiguous() # Store. a : int = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": a : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A ) # Store. No change of shape. a : List[str] = out_val # Transpose the weights. elif weight_or_bias == "weight": a : Tuple = megatron_to_transformers[op_name] a : List[str] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": a : Dict = megatron_to_transformers[op_name] a : Optional[Any] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. a : str = transformer['final_layernorm.weight'] a : List[str] = transformer['final_layernorm.bias'] # For LM head, transformers' wants the matrix to weight embeddings. a : Optional[int] = word_embeddings # It should be done! return output_state_dict def lowerCamelCase__ ( ): # Create the argument parser. a : Dict = argparse.ArgumentParser() parser.add_argument('--print-checkpoint-structure' , action='store_true' ) parser.add_argument( 'path_to_checkpoint' , type=_A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , ) parser.add_argument( '--config_file' , default='' , type=_A , help='An optional config json file describing the pre-trained model.' , ) a : Union[str, Any] = parser.parse_args() # Extract the basename. a : Optional[Any] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" ) if args.path_to_checkpoint.endswith('.zip' ): with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint: with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict: a : Union[str, Any] = torch.load(_A , map_location='cpu' ) else: a : Any = torch.load(args.path_to_checkpoint , map_location='cpu' ) a : List[Any] = input_state_dict.get('args' , _A ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: a : int = 'gelu_fast' elif ds_args.openai_gelu: a : Dict = 'gelu_new' else: a : Any = 'gelu' else: # in the very early days this used to be "gelu_new" a : Any = 'gelu_new' # Spell out all parameters in case the defaults change. a : Tuple = GPTaConfig( vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=5_0256 , eos_token_id=5_0256 , ) else: a : str = GPTaConfig.from_json_file(args.config_file ) a : Any = ['GPT2LMHeadModel'] # Convert. print('Converting' ) a : Union[str, Any] = convert_megatron_checkpoint(_A , _A , _A ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_A , _A ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: a : Union[str, Any] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": a : Tuple = 'gpt2' elif tokenizer_type == "PretrainedFromHF": a : List[str] = ds_args.tokenizer_name_or_path else: raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" ) else: a : Optional[Any] = 'gpt2' a : Tuple = AutoTokenizer.from_pretrained(_A ) a : str = type(_A ).__name__ a : List[str] = tokenizer_class # Store the config to file. print('Saving config' ) config.save_pretrained(_A ) # Save tokenizer based on args print(f"""Adding {tokenizer_class} tokenizer files""" ) tokenizer.save_pretrained(_A ) # Store the state_dict to file. a : Optional[int] = os.path.join(_A , 'pytorch_model.bin' ) print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" ) torch.save(_A , _A ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
96
1
import logging import os import threading import time try: import warnings except ImportError: lowerCAmelCase = None try: import msvcrt except ImportError: lowerCAmelCase = None try: import fcntl except ImportError: lowerCAmelCase = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowerCAmelCase = OSError # Data # ------------------------------------------------ lowerCAmelCase = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] lowerCAmelCase = '3.0.12' lowerCAmelCase = None def _a ( ): """simple docstring""" global _logger lowercase__ = _logger or logging.getLogger(__name__ ) return _logger class _a ( UpperCamelCase__ ): def __init__( self: List[Any] , UpperCamelCase_: Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ = lock_file return None def __str__( self: List[Any] ) -> Any: """simple docstring""" lowercase__ = f'The file lock \'{self.lock_file}\' could not be acquired.' return temp class _a : def __init__( self: List[Any] , UpperCamelCase_: Any ) -> Optional[int]: """simple docstring""" lowercase__ = lock return None def __enter__( self: List[Any] ) -> Optional[Any]: """simple docstring""" return self.lock def __exit__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ) -> Any: """simple docstring""" self.lock.release() return None class _a : def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple=-1 , UpperCamelCase_: List[Any]=None ) -> Dict: """simple docstring""" lowercase__ = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long lowercase__ = self.hash_filename_if_too_long(UpperCamelCase_ , UpperCamelCase_ ) # The path to the lock file. lowercase__ = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowercase__ = None # The default timeout value. lowercase__ = timeout # We use this lock primarily for the lock counter. lowercase__ = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowercase__ = 0 return None @property def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" return self._lock_file @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return self._timeout @timeout.setter def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Dict ) -> str: """simple docstring""" lowercase__ = float(UpperCamelCase_ ) return None def lowerCamelCase_ ( self: List[str] ) -> Any: """simple docstring""" raise NotImplementedError() def lowerCamelCase_ ( self: Tuple ) -> Optional[int]: """simple docstring""" raise NotImplementedError() @property def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" return self._lock_file_fd is not None def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=0.05 ) -> str: """simple docstring""" if timeout is None: lowercase__ = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowercase__ = id(self ) lowercase__ = self._lock_file lowercase__ = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' ) self._acquire() if self.is_locked: logger().debug(f'Lock {lock_id} acquired on {lock_filename}' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' ) raise Timeout(self._lock_file ) else: logger().debug( f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' ) time.sleep(UpperCamelCase_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowercase__ = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase_ ( self: Any , UpperCamelCase_: int=False ) -> int: """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowercase__ = id(self ) lowercase__ = self._lock_file logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' ) self._release() lowercase__ = 0 logger().debug(f'Lock {lock_id} released on {lock_filename}' ) return None def __enter__( self: Optional[int] ) -> Tuple: """simple docstring""" self.acquire() return self def __exit__( self: str , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] ) -> Any: """simple docstring""" self.release() return None def __del__( self: Any ) -> Union[str, Any]: """simple docstring""" self.release(force=UpperCamelCase_ ) return None def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: int ) -> str: """simple docstring""" lowercase__ = os.path.basename(UpperCamelCase_ ) if len(UpperCamelCase_ ) > max_length and max_length > 0: lowercase__ = os.path.dirname(UpperCamelCase_ ) lowercase__ = str(hash(UpperCamelCase_ ) ) lowercase__ = filename[: max_length - len(UpperCamelCase_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(UpperCamelCase_ , UpperCamelCase_ ) else: return path class _a ( UpperCamelCase__ ): def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple=-1 , UpperCamelCase_: List[Any]=None ) -> Union[str, Any]: """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ ) lowercase__ = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowercase__ = os.open(self._lock_file , UpperCamelCase_ ) except OSError: pass else: try: msvcrt.locking(UpperCamelCase_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(UpperCamelCase_ ) else: lowercase__ = fd return None def lowerCamelCase_ ( self: str ) -> Optional[int]: """simple docstring""" lowercase__ = self._lock_file_fd lowercase__ = None msvcrt.locking(UpperCamelCase_ , msvcrt.LK_UNLCK , 1 ) os.close(UpperCamelCase_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _a ( UpperCamelCase__ ): def __init__( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=-1 , UpperCamelCase_: Tuple=None ) -> str: """simple docstring""" lowercase__ = os.statvfs(os.path.dirname(UpperCamelCase_ ) ).f_namemax super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowercase__ = os.open(self._lock_file , UpperCamelCase_ ) try: fcntl.flock(UpperCamelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(UpperCamelCase_ ) else: lowercase__ = fd return None def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" lowercase__ = self._lock_file_fd lowercase__ = None fcntl.flock(UpperCamelCase_ , fcntl.LOCK_UN ) os.close(UpperCamelCase_ ) return None class _a ( UpperCamelCase__ ): def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowercase__ = os.open(self._lock_file , UpperCamelCase_ ) except OSError: pass else: lowercase__ = fd return None def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" os.close(self._lock_file_fd ) lowercase__ = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowerCAmelCase = None if msvcrt: lowerCAmelCase = WindowsFileLock elif fcntl: lowerCAmelCase = UnixFileLock else: lowerCAmelCase = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
110
import socket def _a ( ): """simple docstring""" lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) lowercase__ = socket.gethostname() lowercase__ = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: lowercase__ = sock.recv(10_24 ) if not data: break out_file.write(SCREAMING_SNAKE_CASE ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
110
1
from math import factorial def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if n < k or k < 0: raise ValueError('Please enter positive integers for n and k where n >= k' ) return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( "If a class of 40 students must be arranged into groups of", f"""4 for group projects, there are {combinations(40, 4)} ways""", "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", f"""are {combinations(10, 3)} ways that first, second and""", "third place can be awarded.", )
282
from collections.abc import Sequence def _snake_case( SCREAMING_SNAKE_CASE__ : Sequence[int] | None = None ) -> int: '''simple docstring''' if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) A__ = nums[0] for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): A__ = nums[i] A__ = max(SCREAMING_SNAKE_CASE__ , ans + num , SCREAMING_SNAKE_CASE__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowercase_ = int(input("Enter number of elements : ").strip()) lowercase_ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
282
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( lowerCAmelCase : Dict ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def _snake_case ( lowerCAmelCase : str ): """simple docstring""" for char in word: SCREAMING_SNAKE_CASE_ : Any = ord(lowerCAmelCase ) if not _is_chinese_char(lowerCAmelCase ): return 0 return 1 def _snake_case ( lowerCAmelCase : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = set() for token in tokens: SCREAMING_SNAKE_CASE_ : int = len(lowerCAmelCase ) > 1 and is_chinese(lowerCAmelCase ) if chinese_word: word_set.add(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(lowerCAmelCase ) return word_list def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens SCREAMING_SNAKE_CASE_ : str = max([len(lowerCAmelCase ) for w in chinese_word_set] ) SCREAMING_SNAKE_CASE_ : int = bert_tokens SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = 0, len(lowerCAmelCase ) while start < end: SCREAMING_SNAKE_CASE_ : Optional[int] = True if is_chinese(bert_word[start] ): SCREAMING_SNAKE_CASE_ : List[str] = min(end - start , lowerCAmelCase ) for i in range(lowerCAmelCase , 1 , -1 ): SCREAMING_SNAKE_CASE_ : Tuple = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): SCREAMING_SNAKE_CASE_ : Optional[int] = "##" + bert_word[j] SCREAMING_SNAKE_CASE_ : int = start + i SCREAMING_SNAKE_CASE_ : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : LTP , lowerCAmelCase : BertTokenizer ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for i in range(0 , len(lowerCAmelCase ) , 1_0_0 ): SCREAMING_SNAKE_CASE_ : Optional[int] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0] SCREAMING_SNAKE_CASE_ : List[Any] = [get_chinese_word(lowerCAmelCase ) for r in res] ltp_res.extend(lowerCAmelCase ) assert len(lowerCAmelCase ) == len(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for i in range(0 , len(lowerCAmelCase ) , 1_0_0 ): SCREAMING_SNAKE_CASE_ : str = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCAmelCase , truncation=lowerCAmelCase , max_length=5_1_2 ) bert_res.extend(res["input_ids"] ) assert len(lowerCAmelCase ) == len(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : int = [] for input_ids, chinese_word in zip(lowerCAmelCase , lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : str = [] for id in input_ids: SCREAMING_SNAKE_CASE_ : Tuple = bert_tokenizer._convert_id_to_token(lowerCAmelCase ) input_tokens.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Any = add_sub_symbol(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Tuple = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCAmelCase ): if token[:2] == "##": SCREAMING_SNAKE_CASE_ : List[Any] = token[2:] # save chinese tokens' pos if len(lowerCAmelCase ) == 1 and _is_chinese_char(ord(lowerCAmelCase ) ): ref_id.append(lowerCAmelCase ) ref_ids.append(lowerCAmelCase ) assert len(lowerCAmelCase ) == len(lowerCAmelCase ) return ref_ids def _snake_case ( lowerCAmelCase : Optional[int] ): """simple docstring""" with open(args.file_name , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_ : List[Any] = f.readlines() SCREAMING_SNAKE_CASE_ : int = [line.strip() for line in data if len(lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LTP(args.ltp ) # faster in GPU device SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizer.from_pretrained(args.bert ) SCREAMING_SNAKE_CASE_ : List[str] = prepare_ref(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) with open(args.save_path , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_ : Optional[int] = [json.dumps(lowerCAmelCase ) + "\n" for ref in ref_ids] f.writelines(lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') __lowerCamelCase : int = parser.parse_args() main(args)
18
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
0
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A = 16 __A = 32 def lowercase_ ( _lowerCamelCase: Accelerator , _lowerCamelCase: int = 16 ) -> Dict: '''simple docstring''' __lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-cased" ) __lowerCamelCase : List[Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCamelCase: Any ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase : Tuple = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase : Optional[int] = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCamelCase: Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase : str = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase : Tuple = 8 else: __lowerCamelCase : Dict = None return tokenizer.pad( _lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. __lowerCamelCase : List[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) __lowerCamelCase : Optional[Any] = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A = mocked_dataloaders # noqa: F811 def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Any ) -> Any: '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1": __lowerCamelCase : Dict = 2 # Initialize accelerator __lowerCamelCase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase : Dict = config["lr"] __lowerCamelCase : Union[str, Any] = int(config["num_epochs"] ) __lowerCamelCase : List[Any] = int(config["seed"] ) __lowerCamelCase : str = int(config["batch_size"] ) __lowerCamelCase : str = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_lowerCamelCase ) def inner_training_loop(_lowerCamelCase: str ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase : List[Any] = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase : Any = AdamW(params=model.parameters() , lr=_lowerCamelCase ) __lowerCamelCase : Optional[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase ) # Instantiate scheduler __lowerCamelCase : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase : int = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Now we train the model for epoch in range(_lowerCamelCase ): model.train() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase : List[Any] = model(**_lowerCamelCase ) __lowerCamelCase : int = outputs.loss accelerator.backward(_lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase : List[Any] = model(**_lowerCamelCase ) __lowerCamelCase : Any = outputs.logits.argmax(dim=-1 ) __lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) __lowerCamelCase : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowercase_ ( ) -> str: '''simple docstring''' __lowerCamelCase : Any = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __lowerCamelCase : int = parser.parse_args() __lowerCamelCase : str = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
353
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __A = R''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(a__ ) class _snake_case ( a__ ): snake_case__ = "rag" snake_case__ = True def __init__( self : Dict , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=" / " , UpperCAmelCase : Optional[int]=" // " , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : int=768 , UpperCAmelCase : Any=8 , UpperCAmelCase : Any="wiki_dpr" , UpperCAmelCase : Any="train" , UpperCAmelCase : Union[str, Any]="compressed" , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : str=False , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=False , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : str , ): super().__init__( bos_token_id=UpperCAmelCase , pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , prefix=UpperCAmelCase , vocab_size=UpperCAmelCase , **UpperCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" __lowerCamelCase : Dict = kwargs.pop("question_encoder" ) __lowerCamelCase : str = question_encoder_config.pop("model_type" ) __lowerCamelCase : List[Any] = kwargs.pop("generator" ) __lowerCamelCase : Tuple = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig __lowerCamelCase : Optional[int] = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : Tuple = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : Dict = reduce_loss __lowerCamelCase : Optional[Any] = label_smoothing __lowerCamelCase : List[Any] = exclude_bos_score __lowerCamelCase : List[str] = do_marginalize __lowerCamelCase : str = title_sep __lowerCamelCase : Optional[Any] = doc_sep __lowerCamelCase : List[Any] = n_docs __lowerCamelCase : List[str] = max_combined_length __lowerCamelCase : int = dataset __lowerCamelCase : Any = dataset_split __lowerCamelCase : str = index_name __lowerCamelCase : int = retrieval_vector_size __lowerCamelCase : Union[str, Any] = retrieval_batch_size __lowerCamelCase : Dict = passages_path __lowerCamelCase : int = index_path __lowerCamelCase : List[str] = use_dummy_dataset __lowerCamelCase : int = output_retrieved __lowerCamelCase : List[str] = do_deduplication __lowerCamelCase : Tuple = use_cache if self.forced_eos_token_id is None: __lowerCamelCase : Tuple = getattr(self.generator , "forced_eos_token_id" , UpperCAmelCase ) @classmethod def lowerCamelCase__ ( cls : str , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : List[Any] ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCAmelCase ) def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Any = copy.deepcopy(self.__dict__ ) __lowerCamelCase : Tuple = self.question_encoder.to_dict() __lowerCamelCase : List[Any] = self.generator.to_dict() __lowerCamelCase : Optional[Any] = self.__class__.model_type return output
64
0
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(self , **_lowerCamelCase ) a :Union[str, Any] = repo_info a :int = token a :int = None def SCREAMING_SNAKE_CASE__ ( self ): if self.dir_cache is None: a :Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a :List[Any] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ): if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): self._get_dirs() a :Union[str, Any] = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ): self._get_dirs() a :str = PurePosixPath(path.strip('''/''' ) ) a :Tuple = {} for p, f in self.dir_cache.items(): a :Optional[int] = PurePosixPath(p.strip('''/''' ) ) a :str = p.parent if root == path: a :List[str] = f a :Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
94
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
1
"""simple docstring""" from __future__ import annotations from typing import Any def __UpperCAmelCase ( lowercase ): """simple docstring""" create_state_space_tree(a_ ,[] ,0 ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" if index == len(a_ ): print(a_ ) return create_state_space_tree(a_ ,a_ ,index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(a_ ,a_ ,index + 1 ) current_subsequence.pop() if __name__ == "__main__": UpperCAmelCase__ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
364
"""simple docstring""" from itertools import product def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(lowercase ,max_face_number + 1 ) for dice_numbers in product(lowercase ,repeat=lowercase ): _UpperCAmelCase = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
30
0
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{sampling_rate}''' _lowerCamelCase : str = '1' _lowerCamelCase : str = 'f32le' _lowerCamelCase : Union[str, Any] = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _lowerCamelCase : List[Any] = output_stream[0] _lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ): _lowerCamelCase : Optional[Any] = f'''{sampling_rate}''' _lowerCamelCase : List[str] = '1' if format_for_conversion == "s16le": _lowerCamelCase : List[str] = 2 elif format_for_conversion == "f32le": _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) _lowerCamelCase : Dict = platform.system() if system == "Linux": _lowerCamelCase : Optional[int] = 'alsa' _lowerCamelCase : Optional[Any] = 'default' elif system == "Darwin": _lowerCamelCase : Optional[int] = 'avfoundation' _lowerCamelCase : Any = ':0' elif system == "Windows": _lowerCamelCase : Tuple = 'dshow' _lowerCamelCase : Tuple = 'default' _lowerCamelCase : Optional[int] = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ): if stream_chunk_s is not None: _lowerCamelCase : int = stream_chunk_s else: _lowerCamelCase : Optional[Any] = chunk_length_s _lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": _lowerCamelCase : List[str] = np.intaa _lowerCamelCase : str = 2 elif format_for_conversion == "f32le": _lowerCamelCase : Any = np.floataa _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: _lowerCamelCase : Union[str, Any] = chunk_length_s / 6 _lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): _lowerCamelCase : Any = [stride_length_s, stride_length_s] _lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _lowerCamelCase : List[Any] = datetime.datetime.now() _lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale _lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ ) _lowerCamelCase : int = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _lowerCamelCase : Optional[int] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): _lowerCamelCase : int = B'' _lowerCamelCase, _lowerCamelCase : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) _lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: _lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator _lowerCamelCase : str = (_stride_left, stride_right) _lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride} if stream: _lowerCamelCase : List[Any] = False yield item _lowerCamelCase : Optional[Any] = stride_left _lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: _lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _lowerCamelCase : Tuple = False yield item def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = 2**24 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: _lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
96
"""simple docstring""" import functools from typing import Any def _snake_case ( lowercase__ , lowercase__ ): # Validation if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(lowercase__ , lowercase__ ) or not all( isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie _lowerCamelCase : dict[str, Any] = {} _lowerCamelCase : List[Any] = 'WORD_KEEPER' for word in words: _lowerCamelCase : Dict = trie for c in word: if c not in trie_node: _lowerCamelCase : Any = {} _lowerCamelCase : str = trie_node[c] _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Dict = len(lowercase__ ) # Dynamic programming method @functools.cache def is_breakable(lowercase__ ) -> bool: if index == len_string: return True _lowerCamelCase : List[Any] = trie for i in range(lowercase__ , lowercase__ ): _lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ ) if trie_node is None: return False if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
96
1
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase = logging.get_logger(__name__) class snake_case_ : def __init__( self : Optional[Any] , _snake_case : str = None , _snake_case : uuid.UUID = None , _snake_case : List[str]=None , _snake_case : Tuple=None )->Optional[Any]: '''simple docstring''' if not conversation_id: __lowerCAmelCase : Any = uuid.uuida() if past_user_inputs is None: __lowerCAmelCase : Optional[Any] = [] if generated_responses is None: __lowerCAmelCase : Optional[Any] = [] __lowerCAmelCase : uuid.UUID = conversation_id __lowerCAmelCase : List[str] = past_user_inputs __lowerCAmelCase : List[str] = generated_responses __lowerCAmelCase : Optional[str] = text def __eq__( self : str , _snake_case : Dict )->Optional[Any]: '''simple docstring''' if not isinstance(_snake_case , _snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : bool = False )->Any: '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) __lowerCAmelCase : int = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: __lowerCAmelCase : List[str] = text def UpperCAmelCase__ ( self : List[str] )->int: '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCAmelCase : List[str] = None def UpperCAmelCase__ ( self : List[str] , _snake_case : str )->List[str]: '''simple docstring''' self.generated_responses.append(_snake_case ) def UpperCAmelCase__ ( self : str )->Optional[Any]: '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Dict )->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): __lowerCAmelCase : str = """user""" if is_user else """bot""" output += F'''{name} >> {text} \n''' return output @add_end_docstrings( __lowercase ,R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' ,) class snake_case_ ( __lowercase ): def __init__( self : List[Any] , *_snake_case : str , **_snake_case : List[Any] )->List[Any]: '''simple docstring''' super().__init__(*_snake_case , **_snake_case ) if self.tokenizer.pad_token_id is None: __lowerCAmelCase : Optional[int] = self.tokenizer.eos_token def UpperCAmelCase__ ( self : Dict , _snake_case : Any=None , _snake_case : str=None , _snake_case : Tuple=None , **_snake_case : str )->str: '''simple docstring''' __lowerCAmelCase : Optional[Any] = {} __lowerCAmelCase : Tuple = {} __lowerCAmelCase : List[str] = {} if min_length_for_response is not None: __lowerCAmelCase : Optional[int] = min_length_for_response if minimum_tokens is not None: __lowerCAmelCase : Tuple = minimum_tokens if "max_length" in generate_kwargs: __lowerCAmelCase : List[str] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCAmelCase : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(_snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self : int , _snake_case : Union[Conversation, List[Conversation]] , _snake_case : List[str]=0 , **_snake_case : Optional[Any] )->Optional[int]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = super().__call__(_snake_case , num_workers=_snake_case , **_snake_case ) if isinstance(_snake_case , _snake_case ) and len(_snake_case ) == 1: return outputs[0] return outputs def UpperCAmelCase__ ( self : str , _snake_case : Conversation , _snake_case : Tuple=32 )->Dict[str, Any]: '''simple docstring''' if not isinstance(_snake_case , _snake_case ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): __lowerCAmelCase : str = self.tokenizer._build_conversation_input_ids(_snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCAmelCase : Tuple = self._legacy_parse_and_tokenize(_snake_case ) if self.framework == "pt": __lowerCAmelCase : int = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCAmelCase : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : Optional[int]=10 , **_snake_case : int )->Dict: '''simple docstring''' __lowerCAmelCase : int = generate_kwargs.get("""max_length""" , self.model.config.max_length ) __lowerCAmelCase : Tuple = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) __lowerCAmelCase : Dict = max_length - minimum_tokens __lowerCAmelCase : Dict = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __lowerCAmelCase : List[str] = model_inputs["""attention_mask"""][:, -trim:] __lowerCAmelCase : Union[str, Any] = model_inputs.pop("""conversation""" ) __lowerCAmelCase : Any = max_length __lowerCAmelCase : Union[str, Any] = self.model.generate(**_snake_case , **_snake_case ) if self.model.config.is_encoder_decoder: __lowerCAmelCase : str = 1 else: __lowerCAmelCase : Optional[int] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase__ ( self : str , _snake_case : Any , _snake_case : Optional[Any]=True )->Dict: '''simple docstring''' __lowerCAmelCase : str = model_outputs["""output_ids"""] __lowerCAmelCase : str = self.tokenizer.decode( output_ids[0] , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case , ) __lowerCAmelCase : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(_snake_case ) return conversation def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Conversation )->Dict: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.tokenizer.eos_token_id __lowerCAmelCase : Tuple = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) ) if len(_snake_case ) > self.tokenizer.model_max_length: __lowerCAmelCase : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
350
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class snake_case_ ( __lowercase ): A_ = 'unispeech-sat' def __init__( self : str , _snake_case : List[Any]=32 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[int]=12 , _snake_case : Optional[Any]=3072 , _snake_case : Tuple="gelu" , _snake_case : int=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : int=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=0.02 , _snake_case : int=1E-5 , _snake_case : Dict="group" , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : int=(5, 2, 2, 2, 2, 2, 2) , _snake_case : int=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Any=False , _snake_case : Optional[Any]=128 , _snake_case : Tuple=16 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : Tuple=0.05 , _snake_case : str=10 , _snake_case : Tuple=2 , _snake_case : List[Any]=0.0 , _snake_case : str=10 , _snake_case : Any=0 , _snake_case : List[Any]=320 , _snake_case : Union[str, Any]=2 , _snake_case : Dict=0.1 , _snake_case : Dict=100 , _snake_case : Union[str, Any]=256 , _snake_case : int=256 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : int=False , _snake_case : str=False , _snake_case : str=256 , _snake_case : List[Any]=(512, 512, 512, 512, 1500) , _snake_case : Optional[int]=(5, 3, 3, 1, 1) , _snake_case : Tuple=(1, 2, 3, 1, 1) , _snake_case : Dict=512 , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=1 , _snake_case : Optional[Any]=2 , _snake_case : Optional[int]=504 , **_snake_case : Optional[int] , )->Union[str, Any]: '''simple docstring''' super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case ) __lowerCAmelCase : Dict = hidden_size __lowerCAmelCase : List[Any] = feat_extract_norm __lowerCAmelCase : int = feat_extract_activation __lowerCAmelCase : Union[str, Any] = list(_snake_case ) __lowerCAmelCase : str = list(_snake_case ) __lowerCAmelCase : Optional[Any] = list(_snake_case ) __lowerCAmelCase : Optional[int] = conv_bias __lowerCAmelCase : Dict = num_conv_pos_embeddings __lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups __lowerCAmelCase : Tuple = len(self.conv_dim ) __lowerCAmelCase : int = num_hidden_layers __lowerCAmelCase : str = intermediate_size __lowerCAmelCase : str = hidden_act __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Optional[int] = hidden_dropout __lowerCAmelCase : str = attention_dropout __lowerCAmelCase : int = activation_dropout __lowerCAmelCase : Union[str, Any] = feat_proj_dropout __lowerCAmelCase : List[str] = final_dropout __lowerCAmelCase : Dict = layerdrop __lowerCAmelCase : Tuple = layer_norm_eps __lowerCAmelCase : Optional[Any] = initializer_range __lowerCAmelCase : str = vocab_size __lowerCAmelCase : Optional[int] = num_clusters __lowerCAmelCase : List[Any] = do_stable_layer_norm __lowerCAmelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase : Dict = apply_spec_augment __lowerCAmelCase : List[Any] = mask_time_prob __lowerCAmelCase : List[str] = mask_time_length __lowerCAmelCase : Dict = mask_time_min_masks __lowerCAmelCase : Tuple = mask_feature_prob __lowerCAmelCase : List[str] = mask_feature_length __lowerCAmelCase : str = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowerCAmelCase : Optional[int] = num_codevectors_per_group __lowerCAmelCase : List[Any] = num_codevector_groups __lowerCAmelCase : int = contrastive_logits_temperature __lowerCAmelCase : str = feat_quantizer_dropout __lowerCAmelCase : int = num_negatives __lowerCAmelCase : str = codevector_dim __lowerCAmelCase : Any = proj_codevector_dim __lowerCAmelCase : Any = diversity_loss_weight # ctc loss __lowerCAmelCase : Tuple = ctc_loss_reduction __lowerCAmelCase : Any = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowerCAmelCase : Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowerCAmelCase : List[str] = list(_snake_case ) __lowerCAmelCase : List[str] = list(_snake_case ) __lowerCAmelCase : Optional[int] = list(_snake_case ) __lowerCAmelCase : Optional[int] = xvector_output_dim @property def UpperCAmelCase__ ( self : Optional[Any] )->Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
232
0
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ): '''simple docstring''' _UpperCAmelCase : List[Any] = CanineTokenizer _UpperCAmelCase : Dict = False def A ( self : Tuple ): '''simple docstring''' super().setUp() _snake_case = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[Any] ): '''simple docstring''' return CanineTokenizer.from_pretrained('google/canine-s' ) def A ( self : List[str] , **lowercase : Union[str, Any] ): '''simple docstring''' _snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) _snake_case = 1_024 return tokenizer @require_torch def A ( self : Dict ): '''simple docstring''' _snake_case = self.canine_tokenizer _snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.'] # fmt: off _snake_case = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on _snake_case = tokenizer(lowercase , padding=lowercase , return_tensors='pt' ) self.assertIsInstance(lowercase , lowercase ) _snake_case = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = self.canine_tokenizer _snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.'] _snake_case = tokenizer(lowercase , padding=lowercase , return_tensors='pt' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('input_ids' , lowercase ) self.assertIn('attention_mask' , lowercase ) self.assertIn('token_type_ids' , lowercase ) @require_torch def A ( self : int ): '''simple docstring''' _snake_case = self.canine_tokenizer _snake_case = [ 'What\'s the weater?', 'It\'s about 25 degrees.', ] _snake_case = tokenizer( text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def A ( self : Tuple ): '''simple docstring''' _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _snake_case = tempfile.mkdtemp() _snake_case = ' He is very happy, UNwant\u00E9d,running' _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) _snake_case = tokenizer.__class__.from_pretrained(lowercase ) _snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) _snake_case = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _snake_case = tempfile.mkdtemp() _snake_case = ' He is very happy, UNwant\u00E9d,running' _snake_case = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _snake_case = chr(0xE007 ) additional_special_tokens.append(lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) _snake_case = tokenizer.__class__.from_pretrained(lowercase ) _snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn(lowercase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A ( self : int ): '''simple docstring''' _snake_case = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _snake_case , _snake_case = self.get_clean_sequence(lowercase ) # a special token for Canine can be defined as follows: _snake_case = 0xE005 _snake_case = chr(lowercase ) tokenizer.add_special_tokens({'cls_token': special_token} ) _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertEqual(len(lowercase ) , 1 ) _snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase ) _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertEqual(lowercase , input_encoded + special_token_id ) _snake_case = tokenizer.decode(lowercase , skip_special_tokens=lowercase ) self.assertTrue(special_token not in decoded ) def A ( self : Optional[int] ): '''simple docstring''' _snake_case = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _snake_case = chr(0xE005 ) _snake_case = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} ) _snake_case = tokenizer.tokenize(lowercase ) _snake_case = tokenizer.tokenize(lowercase ) self.assertEqual(len(lowercase ) , 1 ) self.assertEqual(len(lowercase ) , 1 ) self.assertEqual(token_a[0] , lowercase ) self.assertEqual(token_a[0] , lowercase ) @require_tokenizers def A ( self : Dict ): '''simple docstring''' _snake_case = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: _snake_case = 0xE006 _snake_case = chr(lowercase ) _snake_case = AddedToken(lowercase , lstrip=lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowercase ) tokenizer.from_pretrained(lowercase ) def A ( self : Tuple ): '''simple docstring''' _snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _snake_case = json.load(lowercase ) with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _snake_case = json.load(lowercase ) # a special token for Canine can be defined as follows: _snake_case = 0xE006 _snake_case = chr(lowercase ) _snake_case = [new_token_a] _snake_case = [new_token_a] with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case = tokenizer_class.from_pretrained(lowercase , extra_ids=0 ) self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _snake_case = 0xE007 _snake_case = chr(lowercase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case = [AddedToken(lowercase , lstrip=lowercase )] _snake_case = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , extra_ids=0 ) self.assertIn(lowercase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def A ( self : Tuple ): '''simple docstring''' _snake_case = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _snake_case = 'hello world' if self.space_between_special_tokens: _snake_case = '[CLS] hello world [SEP]' else: _snake_case = input _snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase ) _snake_case = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowercase , [output, output.lower()] ) def A ( self : List[Any] ): '''simple docstring''' _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _snake_case = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] _snake_case = 'a' _snake_case = ord(lowercase ) for attr in attributes_list: setattr(lowercase , attr + '_id' , lowercase ) self.assertEqual(getattr(lowercase , lowercase ) , lowercase ) self.assertEqual(getattr(lowercase , attr + '_id' ) , lowercase ) setattr(lowercase , attr + '_id' , lowercase ) self.assertEqual(getattr(lowercase , lowercase ) , lowercase ) self.assertEqual(getattr(lowercase , attr + '_id' ) , lowercase ) setattr(lowercase , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(lowercase , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(lowercase , 'additional_special_tokens_ids' ) , [] ) _snake_case = 0xE006 _snake_case = chr(lowercase ) setattr(lowercase , 'additional_special_tokens_ids' , [additional_special_token_id] ) self.assertListEqual(getattr(lowercase , 'additional_special_tokens' ) , [additional_special_token] ) self.assertListEqual(getattr(lowercase , 'additional_special_tokens_ids' ) , [additional_special_token_id] ) def A ( self : str ): '''simple docstring''' pass def A ( self : Optional[int] ): '''simple docstring''' pass def A ( self : Optional[int] ): '''simple docstring''' pass def A ( self : Optional[int] ): '''simple docstring''' pass def A ( self : Optional[Any] ): '''simple docstring''' pass def A ( self : Optional[int] ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' pass def A ( self : Tuple ): '''simple docstring''' pass
282
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def A ( self : List[Any] , lowercase : Dict ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): _snake_case = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase ) def A ( self : str ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Any ): '''simple docstring''' _snake_case = 'sgugger/tiny-distilbert-classification' _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Optional[int] ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : str ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = AutoConfig.from_pretrained(lowercase ) # set architectures equal to `None` _snake_case = None _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def A ( self : str ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : Tuple ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = AutoConfig.from_pretrained(lowercase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Union[str, Any] ): '''simple docstring''' _snake_case = 'sshleifer/tinier_bart' _snake_case = AutoConfig.from_pretrained(lowercase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Dict ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' _snake_case = AutoConfig.from_pretrained(lowercase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : Dict ): '''simple docstring''' _snake_case = 'sshleifer/tinier_bart' _snake_case = AutoConfig.from_pretrained(lowercase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() ) def A ( self : Union[str, Any] ): '''simple docstring''' _snake_case = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase : Optional[Any] ): self.assertTrue(hasattr(lowercase , 'sequential' ) ) self.assertTrue(hasattr(lowercase , 'cumulative' ) ) self.assertTrue(hasattr(lowercase , 'current' ) ) self.assertTrue(hasattr(lowercase , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , ) _snake_case = PyTorchBenchmark(lowercase ) _snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() )
282
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A = logging.get_logger(__name__) A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } A = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __A ( a_ :Union[str, Any] , a_ :str , a_ :Tuple , a_ :Any , a_ :Union[str, Any]) -> List[Any]: for attribute in key.split('''.'''): __a : Optional[int] = getattr(A__ , A__) if weight_type is not None: __a : Union[str, Any] = getattr(A__ , A__).shape else: __a : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""") if weight_type == "weight": __a : Tuple = value elif weight_type == "weight_g": __a : Optional[int] = value elif weight_type == "weight_v": __a : Tuple = value elif weight_type == "bias": __a : List[str] = value elif weight_type == "running_mean": __a : str = value elif weight_type == "running_var": __a : List[str] = value elif weight_type == "num_batches_tracked": __a : Union[str, Any] = value elif weight_type == "inv_freq": __a : Optional[Any] = value else: __a : int = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""") def __A ( a_ :Optional[Any] , a_ :Any , a_ :List[Any]) -> List[str]: __a : List[Any] = [] __a : Tuple = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , ) __a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Dict = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]: __a : int = True if "*" in mapped_key: __a : Optional[int] = name.split(A__)[0].split('''.''')[-2] __a : List[str] = mapped_key.replace('''*''' , A__) if "pos_bias_u" in name: __a : List[str] = None elif "pos_bias_v" in name: __a : Any = None elif "weight_g" in name: __a : str = """weight_g""" elif "weight_v" in name: __a : Tuple = """weight_v""" elif "bias" in name: __a : Optional[Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : Optional[Any] = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Union[str, Any] = """inv_freq""" elif "running_var" in name: __a : Any = """running_var""" elif "num_batches_tracked" in name: __a : Tuple = """num_batches_tracked""" else: __a : Any = None set_recursively(A__ , A__ , A__ , A__ , A__) continue if not is_used: unused_weights.append(A__) logger.warning(F"""Unused weights: {unused_weights}""") def __A ( a_ :Tuple , a_ :Any , a_ :List[Any] , a_ :str , a_ :Tuple) -> List[Any]: __a : List[Any] = full_name.split('''conv_layers.''')[-1] __a : int = name.split('''.''') __a : Dict = int(items[0]) __a : Dict = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""") __a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""") elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""") __a : Union[str, Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""") __a : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""") elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""") __a : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""") else: unused_weights.append(A__) @torch.no_grad() def __A ( a_ :Dict , a_ :Dict , a_ :Dict=None , a_ :List[Any]=None , a_ :Any=True) -> List[str]: if config_path is not None: __a : List[str] = WavaVecaConformerConfig.from_pretrained(A__ , hidden_act='''swish''') else: __a : int = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : str = """rotary""" if is_finetuned: if dict_path: __a : Optional[Any] = Dictionary.load(A__) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : Optional[int] = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : List[Any] = target_dict.eos_index __a : Optional[Any] = len(target_dict.symbols) __a : Optional[int] = os.path.join(A__ , '''vocab.json''') if not os.path.isdir(A__): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A__)) return os.makedirs(A__ , exist_ok=A__) __a : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __a : str = 0 __a : List[str] = 1 with open(A__ , '''w''' , encoding='''utf-8''') as vocab_handle: json.dump(A__ , A__) __a : List[str] = WavaVecaCTCTokenizer( A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A__ , ) __a : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __a : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , ) __a : str = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__) processor.save_pretrained(A__) __a : Dict = WavaVecaConformerForCTC(A__) else: __a : List[Any] = WavaVecaConformerForPreTraining(A__) if is_finetuned: __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])}) else: __a : Dict = argparse.Namespace(task='''audio_pretraining''') __a : str = fairseq.tasks.setup_task(A__) __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__) __a : int = model[0].eval() recursively_load_weights(A__ , A__ , not is_finetuned) hf_wavavec.save_pretrained(A__) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) A = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
351
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {'''vocab_file''': '''spiece.model'''} A = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } A = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } A = '''▁''' class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __a : int = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) __a : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __a : Tuple = do_lower_case __a : Optional[Any] = remove_space __a : Optional[Any] = keep_accents __a : Union[str, Any] = vocab_file __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) @property def _lowerCamelCase ( self ): return len(self.sp_model ) def _lowerCamelCase ( self ): __a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __a : str = self.__dict__.copy() __a : Tuple = None return state def __setstate__( self , _UpperCAmelCase ): __a : Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a : Optional[Any] = {} __a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self , _UpperCAmelCase ): if self.remove_space: __a : Any = ''' '''.join(inputs.strip().split() ) else: __a : Tuple = inputs __a : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: __a : List[str] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase ) __a : Optional[int] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: __a : Optional[Any] = outputs.lower() return outputs def _lowerCamelCase ( self , _UpperCAmelCase ): __a : int = self.preprocess_text(_UpperCAmelCase ) __a : Tuple = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) __a : int = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): __a : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __a : Tuple = cur_pieces[1:] else: __a : Optional[Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def _lowerCamelCase ( self , _UpperCAmelCase ): return self.sp_model.PieceToId(_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : List[str] = [] __a : str = '''''' __a : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase ) + token __a : Tuple = True __a : Tuple = [] else: current_sub_tokens.append(_UpperCAmelCase ) __a : Optional[int] = False out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : int = [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : Union[str, Any] = [self.sep_token_id] __a : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: __a : Any = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
188
0
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCamelCase_ : Optional[Any] = """scheduler_config.json""" class __A ( __a ): """simple docstring""" __lowerCAmelCase = 1 __lowerCAmelCase = 2 __lowerCAmelCase = 3 __lowerCAmelCase = 4 __lowerCAmelCase = 5 __lowerCAmelCase = 6 __lowerCAmelCase = 7 __lowerCAmelCase = 8 __lowerCAmelCase = 9 __lowerCAmelCase = 10 __lowerCAmelCase = 11 __lowerCAmelCase = 12 __lowerCAmelCase = 13 __lowerCAmelCase = 14 @dataclass class __A ( __a ): """simple docstring""" __lowerCAmelCase = 42 class __A : """simple docstring""" __lowerCAmelCase = SCHEDULER_CONFIG_NAME __lowerCAmelCase = [] __lowerCAmelCase = True @classmethod def SCREAMING_SNAKE_CASE ( cls , __A = None , __A = None , __A=False , **__A , ) -> Union[str, Any]: a =cls.load_config( pretrained_model_name_or_path=a_ , subfolder=a_ , return_unused_kwargs=a_ , return_commit_hash=a_ , **a_ , ) return cls.from_config(a_ , return_unused_kwargs=a_ , **a_ ) def SCREAMING_SNAKE_CASE ( self , __A , __A = False , **__A ) -> Optional[int]: self.save_config(save_directory=a_ , push_to_hub=a_ , **a_ ) @property def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]: a =list(set([cls.__name__] + cls._compatibles ) ) a =importlib.import_module(__name__.split('''.''' )[0] ) a =[ getattr(a_ , a_ ) for c in compatible_classes_str if hasattr(a_ , a_ ) ] return compatible_classes
81
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ = 16 A_ = 32 def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _snake_case : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case__ : Any ): # max_length=None => use the model max length (it's actually the default) _snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : List[Any] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : str = 16 elif accelerator.mixed_precision != "no": _snake_case : Optional[int] = 8 else: _snake_case : Optional[int] = None return tokenizer.pad( snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , ) # Instantiate dataloaders. _snake_case : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) _snake_case : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1": _snake_case : List[Any] = 2 # Initialize accelerator _snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Tuple = config["""lr"""] _snake_case : str = int(config["""num_epochs"""] ) _snake_case : Union[str, Any] = int(config["""seed"""] ) _snake_case : Union[str, Any] = int(config["""batch_size"""] ) _snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case__ ) def inner_training_loop(snake_case__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Tuple = model.to(accelerator.device ) # Instantiate optimizer _snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ ) _snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate scheduler _snake_case : str = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : int = model(**snake_case__ ) _snake_case : str = outputs.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : int = model(**snake_case__ ) _snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) _snake_case : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , snake_case__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _snake_case : Dict = parser.parse_args() _snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
64
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : Dict = "encoder-decoder" _UpperCAmelCase : int = True def __init__( self : Union[str, Any] , **lowercase : Tuple ): '''simple docstring''' super().__init__(**lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _snake_case = kwargs.pop('encoder' ) _snake_case = encoder_config.pop('model_type' ) _snake_case = kwargs.pop('decoder' ) _snake_case = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _snake_case = AutoConfig.for_model(lowercase , **lowercase ) _snake_case = AutoConfig.for_model(lowercase , **lowercase ) _snake_case = True @classmethod def A ( cls : List[Any] , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : Union[str, Any] ): '''simple docstring''' logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) _snake_case = True _snake_case = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase ) def A ( self : Tuple ): '''simple docstring''' _snake_case = copy.deepcopy(self.__dict__ ) _snake_case = self.encoder.to_dict() _snake_case = self.decoder.to_dict() _snake_case = self.__class__.model_type return output
130
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , lowercase : Union[str, Any] , lowercase : str=7 , lowercase : Union[str, Any]=3 , lowercase : Tuple=30 , lowercase : Optional[Any]=400 , lowercase : List[Any]=True , lowercase : Any=None , lowercase : str=True , lowercase : Tuple=[0.5, 0.5, 0.5] , lowercase : List[Any]=[0.5, 0.5, 0.5] , lowercase : Union[str, Any]=True , lowercase : List[Any]=1 / 255 , lowercase : int=True , ): '''simple docstring''' _snake_case = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = min_resolution _snake_case = max_resolution _snake_case = do_resize _snake_case = size _snake_case = do_normalize _snake_case = image_mean _snake_case = image_std _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_pad def A ( self : str ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def A ( self : Optional[int] , lowercase : List[Any] , lowercase : Tuple=False ): '''simple docstring''' if not batched: _snake_case = image_inputs[0] if isinstance(lowercase , Image.Image ): _snake_case , _snake_case = image.size else: _snake_case , _snake_case = image.shape[1], image.shape[2] if w < h: _snake_case = int(self.size['shortest_edge'] * h / w ) _snake_case = self.size['shortest_edge'] elif w > h: _snake_case = self.size['shortest_edge'] _snake_case = int(self.size['shortest_edge'] * w / h ) else: _snake_case = self.size['shortest_edge'] _snake_case = self.size['shortest_edge'] else: _snake_case = [] for image in image_inputs: _snake_case , _snake_case = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _snake_case = max(lowercase , key=lambda lowercase : item[0] )[0] _snake_case = max(lowercase , key=lambda lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ): '''simple docstring''' _UpperCAmelCase : Dict = DeformableDetrImageProcessor if is_vision_available() else None def A ( self : List[Any] ): '''simple docstring''' _snake_case = DeformableDetrImageProcessingTester(self ) @property def A ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Dict ): '''simple docstring''' _snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , 'image_mean' ) ) self.assertTrue(hasattr(lowercase , 'image_std' ) ) self.assertTrue(hasattr(lowercase , 'do_normalize' ) ) self.assertTrue(hasattr(lowercase , 'do_resize' ) ) self.assertTrue(hasattr(lowercase , 'do_rescale' ) ) self.assertTrue(hasattr(lowercase , 'do_pad' ) ) self.assertTrue(hasattr(lowercase , 'size' ) ) def A ( self : Union[str, Any] ): '''simple docstring''' _snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad , lowercase ) _snake_case = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , lowercase ) def A ( self : Dict ): '''simple docstring''' pass def A ( self : List[str] ): '''simple docstring''' _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input _snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) _snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : List[str] ): '''simple docstring''' _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input _snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values _snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input _snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values _snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def A ( self : List[str] ): '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _snake_case = json.loads(f.read() ) _snake_case = {'image_id': 39_769, 'annotations': target} # encode them _snake_case = DeformableDetrImageProcessor() _snake_case = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' ) # verify pixel values _snake_case = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape , lowercase ) _snake_case = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) ) # verify area _snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) ) # verify boxes _snake_case = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase ) _snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) ) # verify image_id _snake_case = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) ) # verify is_crowd _snake_case = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) ) # verify class_labels _snake_case = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) ) # verify orig_size _snake_case = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) ) # verify size _snake_case = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) ) @slow def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _snake_case = json.loads(f.read() ) _snake_case = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} _snake_case = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _snake_case = DeformableDetrImageProcessor(format='coco_panoptic' ) _snake_case = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' ) # verify pixel values _snake_case = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape , lowercase ) _snake_case = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) ) # verify area _snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) ) # verify boxes _snake_case = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase ) _snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) ) # verify image_id _snake_case = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) ) # verify is_crowd _snake_case = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) ) # verify class_labels _snake_case = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) ) # verify masks _snake_case = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase ) # verify orig_size _snake_case = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) ) # verify size _snake_case = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
130
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , **lowerCAmelCase__ : Tuple): super().__init__(**lowerCAmelCase__) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch.") requires_backends(self , "vision") self.check_model_type(lowerCAmelCase__) def __call__( self : Union[str, Any] , lowerCAmelCase__ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowerCAmelCase__ : Union[str, List[str]] = None , **lowerCAmelCase__ : Optional[int] , ): if "text_queries" in kwargs: SCREAMING_SNAKE_CASE_: Tuple = kwargs.pop("text_queries") if isinstance(lowerCAmelCase__ , (str, Image.Image)): SCREAMING_SNAKE_CASE_: str = {"image": image, "candidate_labels": candidate_labels} else: SCREAMING_SNAKE_CASE_: Tuple = image SCREAMING_SNAKE_CASE_: List[Any] = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__) return results def _SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if "threshold" in kwargs: SCREAMING_SNAKE_CASE_: str = kwargs["threshold"] if "top_k" in kwargs: SCREAMING_SNAKE_CASE_: Optional[Any] = kwargs["top_k"] return {}, {}, postprocess_params def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(inputs["image"]) SCREAMING_SNAKE_CASE_: Tuple = inputs["candidate_labels"] if isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: int = candidate_labels.split(",") SCREAMING_SNAKE_CASE_: str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.image_processor(lowerCAmelCase__ , return_tensors=self.framework) yield { "is_last": i == len(lowerCAmelCase__) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: int = model_inputs.pop("target_size") SCREAMING_SNAKE_CASE_: List[str] = model_inputs.pop("candidate_label") SCREAMING_SNAKE_CASE_: Optional[int] = model_inputs.pop("is_last") SCREAMING_SNAKE_CASE_: List[str] = self.model(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=None): SCREAMING_SNAKE_CASE_: Optional[Any] = [] for model_output in model_outputs: SCREAMING_SNAKE_CASE_: Optional[int] = model_output["candidate_label"] SCREAMING_SNAKE_CASE_: Any = BaseModelOutput(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = self.image_processor.post_process_object_detection( outputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ , target_sizes=model_output["target_size"])[0] for index in outputs["scores"].nonzero(): SCREAMING_SNAKE_CASE_: int = outputs["scores"][index].item() SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_bounding_box(outputs["boxes"][index][0]) SCREAMING_SNAKE_CASE_: Tuple = {"score": score, "label": label, "box": box} results.append(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x["score"] , reverse=lowerCAmelCase__) if top_k: SCREAMING_SNAKE_CASE_: int = results[:top_k] return results def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : "torch.Tensor"): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = box.int().tolist() SCREAMING_SNAKE_CASE_: Dict = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
13
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __a = logging.get_logger(__name__) def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ): '''simple docstring''' # Recurse if needed if "." in tensor_name: lowercase_ = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase_ = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowercase_ = new_module lowercase_ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) lowercase_ = tensor_name in module._buffers lowercase_ = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) lowercase_ = False lowercase_ = False if is_buffer or not is_bitsandbytes_available(): lowercase_ = False lowercase_ = False else: lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase_ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase_ = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: lowercase_ = new_value.T lowercase_ = old_value.__dict__ if is_abit: lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) lowercase_ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to(snake_case__ ) else: lowercase_ = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: lowercase_ = new_value else: lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) lowercase_ = new_value def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: lowercase_ = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): lowercase_ , lowercase_ = module.weight.shape else: lowercase_ = module.in_features lowercase_ = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase_ = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase_ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase_ = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase_ = True # Store the module class in case we need to transpose the weight later lowercase_ = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ): '''simple docstring''' lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( *snake_case__: str , **snake_case__: Dict ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def a ( *snake_case__: Any , **snake_case__: List[Any] ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase_ = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase_ = sum(snake_case__ , [] ) lowercase_ = len(snake_case__ ) > 0 # Check if it is a base model lowercase_ = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase_ = list(model.named_children() ) lowercase_ = [list_modules[-1][0]] # add last module together with tied weights lowercase_ = set(snake_case__ ) - set(snake_case__ ) lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys lowercase_ = ['''.weight''', '''.bias'''] lowercase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase_ = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
30
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase : Tuple ={ '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int =[ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCamelCase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
196
from ..utils import DummyObject, requires_backends class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : int = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] )
196
1
'''simple docstring''' from math import factorial def _A ( A__ = 100 ): """simple docstring""" return sum(int(A__ ) for x in str(factorial(A__ ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
104
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
232
0
"""simple docstring""" from __future__ import annotations def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
23
1
'''simple docstring''' from heapq import heappop, heappush import numpy as np def UpperCamelCase_( snake_case : np.ndarray , snake_case : tuple[int, int] , snake_case : tuple[int, int] , snake_case : bool , ): '''simple docstring''' snake_case_ , snake_case_ = grid.shape snake_case_ = [-1, 1, 0, 0] snake_case_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] snake_case_ , snake_case_ = [(0, source)], set() snake_case_ = np.full((rows, cols) , np.inf ) snake_case_ = 0 snake_case_ = np.empty((rows, cols) , dtype=snake_case ) snake_case_ = None while queue: ((snake_case_) , (snake_case_)) = heappop(snake_case ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: snake_case_ = [] while (x, y) != source: path.append((x, y) ) snake_case_ , snake_case_ = predecessors[x, y] path.append(snake_case ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(snake_case ) ): snake_case_ , snake_case_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: snake_case_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(snake_case , (dist + 1, (nx, ny)) ) snake_case_ = dist + 1 snake_case_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
85
from __future__ import annotations def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
188
0
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __A ( *_lowercase ): '''simple docstring''' with open(_lowercase , '''r''' ) as fh: fcntl.flock(_lowercase , fcntl.LOCK_EX ) try: print(*_lowercase ) finally: fcntl.flock(_lowercase , fcntl.LOCK_UN ) __A = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __A = torch.device('cuda', local_rank) __A = socket.gethostname() __A = f'[{hostname}-{local_rank}]' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __A = dist.get_rank() __A = dist.get_world_size() printflock(f'{gpu} is OK (global rank: {rank}/{world_size})') dist.barrier() if rank == 0: printflock(f'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}') except Exception: printflock(f'{gpu} is broken') raise
75
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @property def __A ( self: Dict ) -> Union[str, Any]: torch.manual_seed(0 ) _A = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self: Any ) -> Union[str, Any]: _A = self.dummy_uncond_unet _A = ScoreSdeVeScheduler() _A = ScoreSdeVePipeline(unet=__A , scheduler=__A ) sde_ve.to(__A ) sde_ve.set_progress_bar_config(disable=__A ) _A = torch.manual_seed(0 ) _A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images _A = torch.manual_seed(0 ) _A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[ 0 ] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict ) -> Any: _A = '''google/ncsnpp-church-256''' _A = UNetaDModel.from_pretrained(__A ) _A = ScoreSdeVeScheduler.from_pretrained(__A ) _A = ScoreSdeVePipeline(unet=__A , scheduler=__A ) sde_ve.to(__A ) sde_ve.set_progress_bar_config(disable=__A ) _A = torch.manual_seed(0 ) _A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) _A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
75
1
from collections.abc import Sequence def __lowerCamelCase ( lowerCamelCase__ = None ): """simple docstring""" if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) lowercase__ : List[Any] = nums[0] for i in range(1 , len(lowerCamelCase__ ) ): lowercase__ : Optional[Any] = nums[i] lowercase__ : Union[str, Any] = max(lowerCamelCase__ , ans + num , lowerCamelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowerCAmelCase__ = int(input('''Enter number of elements : ''').strip()) lowerCAmelCase__ = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
130
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''distilbert-base-uncased''': 5_1_2, '''distilbert-base-uncased-distilled-squad''': 5_1_2, '''distilbert-base-cased''': 5_1_2, '''distilbert-base-cased-distilled-squad''': 5_1_2, '''distilbert-base-german-cased''': 5_1_2, '''distilbert-base-multilingual-cased''': 5_1_2, } lowerCAmelCase__ = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["""input_ids""", """attention_mask"""] lowercase_ = DistilBertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE : str="[SEP]" , SCREAMING_SNAKE_CASE : Dict="[PAD]" , SCREAMING_SNAKE_CASE : List[str]="[CLS]" , SCREAMING_SNAKE_CASE : List[str]="[MASK]" , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : Dict , ): super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowercase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop("type" ) ) lowercase__ : Any = do_lower_case lowercase__ : Optional[int] = strip_accents lowercase__ : List[Any] = tokenize_chinese_chars lowercase__ : List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = do_lower_case def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=None ): lowercase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): lowercase__ : Dict = [self.sep_token_id] lowercase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase__ : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE )
130
1
'''simple docstring''' import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
123
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCAmelCase : Optional[Any] ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" __lowerCAmelCase : Union[str, Any] ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" __lowerCAmelCase : str ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def UpperCamelCase ( _lowerCamelCase : List[Any] ): def remove_articles(_lowerCamelCase : Dict ): A__ = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCamelCase , " " , _lowerCamelCase ) def white_space_fix(_lowerCamelCase : Tuple ): return " ".join(text.split() ) def remove_punc(_lowerCamelCase : int ): A__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCamelCase : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) ) def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ): return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) ) def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ): A__ = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )] return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_00 def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ): A__ = [rgram for rgrams in rgramslist for rgram in rgrams] A__ = Counter(_lowerCamelCase ) A__ = Counter(_lowerCamelCase ) A__ = Counter() for sgram, scount in sgramcounter.items(): A__ = scount * numref A__ = Counter(_lowerCamelCase ) A__ = Counter() for cgram, ccount in cgramcounter.items(): A__ = ccount * numref # KEEP A__ = sgramcounter_rep & cgramcounter_rep A__ = keepgramcounter_rep & rgramcounter A__ = sgramcounter_rep & rgramcounter A__ = 0 A__ = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. A__ = 1 A__ = 1 if len(_lowerCamelCase ) > 0: A__ = keeptmpscorea / len(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() ) A__ = 0 if keepscore_precision > 0 or keepscore_recall > 0: A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION A__ = sgramcounter_rep - cgramcounter_rep A__ = delgramcounter_rep - rgramcounter A__ = sgramcounter_rep - rgramcounter A__ = 0 A__ = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. A__ = 1 if len(_lowerCamelCase ) > 0: A__ = deltmpscorea / len(_lowerCamelCase ) # ADDITION A__ = set(_lowerCamelCase ) - set(_lowerCamelCase ) A__ = set(_lowerCamelCase ) & set(_lowerCamelCase ) A__ = set(_lowerCamelCase ) - set(_lowerCamelCase ) A__ = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. A__ = 1 A__ = 1 if len(_lowerCamelCase ) > 0: A__ = addtmpscore / len(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: A__ = addtmpscore / len(_lowerCamelCase ) A__ = 0 if addscore_precision > 0 or addscore_recall > 0: A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): A__ = len(_lowerCamelCase ) A__ = ssent.split(" " ) A__ = csent.split(" " ) A__ = [] A__ = [] A__ = [] A__ = [] A__ = [] A__ = [] A__ = [] A__ = [] A__ = [] A__ = [] for rsent in rsents: A__ = rsent.split(" " ) A__ = [] A__ = [] A__ = [] ragramslist.append(_lowerCamelCase ) for i in range(0 , len(_lowerCamelCase ) - 1 ): if i < len(_lowerCamelCase ) - 1: A__ = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCamelCase ) if i < len(_lowerCamelCase ) - 2: A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCamelCase ) if i < len(_lowerCamelCase ) - 3: A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCamelCase ) ragramslist.append(_lowerCamelCase ) ragramslist.append(_lowerCamelCase ) ragramslist.append(_lowerCamelCase ) for i in range(0 , len(_lowerCamelCase ) - 1 ): if i < len(_lowerCamelCase ) - 1: A__ = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCamelCase ) if i < len(_lowerCamelCase ) - 2: A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCamelCase ) if i < len(_lowerCamelCase ) - 3: A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCamelCase ) for i in range(0 , len(_lowerCamelCase ) - 1 ): if i < len(_lowerCamelCase ) - 1: A__ = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCamelCase ) if i < len(_lowerCamelCase ) - 2: A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCamelCase ) if i < len(_lowerCamelCase ) - 3: A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCamelCase ) ((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 A__ = sum([delascore, delascore, delascore, delascore] ) / 4 A__ = sum([addascore, addascore, addascore, addascore] ) / 4 A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: A__ = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: A__ = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase ) else: A__ = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase ) elif tokenizer == "moses": A__ = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase ) elif tokenizer == "penn": A__ = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase ) else: A__ = sentence if not return_str: A__ = normalized_sent.split() return normalized_sent def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ): if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )): raise ValueError("Sources length must match predictions and references lengths." ) A__ = 0 for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] ) A__ = sari_score / len(_lowerCamelCase ) return 1_00 * sari_score def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]="exp" , _lowerCamelCase : int=None , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=False , ): A__ = len(references[0] ) if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )] A__ = sacrebleu.corpus_bleu( _lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def UpperCAmelCase_ ( self :Any )-> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self :str , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int )-> int: A__ = {} result.update({"sari": compute_sari(sources=lowercase_ , predictions=lowercase_ , references=lowercase_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=lowercase_ , references=lowercase_ )} ) result.update({"exact": compute_em(predictions=lowercase_ , references=lowercase_ )} ) return result
123
1
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def snake_case_ ( snake_case , snake_case , snake_case=8 ) -> Tuple: lowercase__: Tuple = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase__: Union[str, Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def snake_case_ ( snake_case , snake_case=5_12 , snake_case=5_12 ) -> Dict: lowercase__: Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) lowercase__: Optional[int] = np.array(pil_image.convert('RGB' ) ) lowercase__: Optional[Any] = arr.astype(np.floataa ) / 1_2_7.5 - 1 lowercase__: str = np.transpose(snake_case , [2, 0, 1] ) lowercase__: Dict = torch.from_numpy(snake_case ).unsqueeze(0 ) return image class __a ( __UpperCamelCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any: '''simple docstring''' super().__init__() self.register_modules( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , movq=lowerCAmelCase__ , ) lowercase__: Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' # get the original timestep using init_timestep lowercase__: Dict = min(int(num_inference_steps * strength ) , lowerCAmelCase__ ) lowercase__: Optional[int] = max(num_inference_steps - init_timestep , 0 ) lowercase__: int = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Dict: '''simple docstring''' if not isinstance(lowerCAmelCase__ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase__ )}' ) lowercase__: Tuple = image.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) lowercase__: str = batch_size * num_images_per_prompt if image.shape[1] == 4: lowercase__: int = image else: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowercase__: str = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase__ ) ] lowercase__: Dict = torch.cat(lowerCAmelCase__ , dim=0 ) else: lowercase__: Optional[int] = self.movq.encode(lowerCAmelCase__ ).latent_dist.sample(lowerCAmelCase__ ) lowercase__: Optional[int] = self.movq.config.scaling_factor * init_latents lowercase__: str = torch.cat([init_latents] , dim=0 ) lowercase__: List[Any] = init_latents.shape lowercase__: Optional[int] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) # get latents lowercase__: str = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: Union[str, Any] = init_latents return latents def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=0 ) -> Optional[int]: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowercase__: Optional[int] = torch.device(F'cuda:{gpu_id}' ) lowercase__: Any = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=0 ) -> str: '''simple docstring''' if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) lowercase__: Optional[int] = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=lowerCAmelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase__: int = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase__ , lowercase__: List[str] = cpu_offload_with_hook(lowerCAmelCase__ , lowerCAmelCase__ , prev_module_hook=lowerCAmelCase__ ) # We'll offload the last model manually. lowercase__: Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCAmelCase__ , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 0.3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Optional[int]: '''simple docstring''' lowercase__: int = self._execution_device lowercase__: Tuple = guidance_scale > 1.0 if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowercase__: Optional[int] = torch.cat(lowerCAmelCase__ , dim=0 ) lowercase__: str = image_embeds.shape[0] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowercase__: List[str] = torch.cat(lowerCAmelCase__ , dim=0 ) if do_classifier_free_guidance: lowercase__: int = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 ) lowercase__: int = negative_image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 ) lowercase__: str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase__ ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowercase__: str = [image] if not all(isinstance(lowerCAmelCase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'Input is in incorrect format: {[type(lowerCAmelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' ) lowercase__: Optional[Any] = torch.cat([prepare_image(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for i in image] , dim=0 ) lowercase__: List[str] = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase__ ) lowercase__: int = self.movq.encode(lowerCAmelCase__ )['latents'] lowercase__: List[str] = latents.repeat_interleave(lowerCAmelCase__ , dim=0 ) self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__ ) lowercase__ , lowercase__: Dict = self.get_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) lowercase__ , lowercase__: Optional[Any] = downscale_height_and_width(lowerCAmelCase__ , lowerCAmelCase__ , self.movq_scale_factor ) lowercase__: List[Any] = self.prepare_latents( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ ) for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ): # expand the latents if we are doing classifier free guidance lowercase__: List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__: Optional[Any] = {'image_embeds': image_embeds} lowercase__: List[str] = self.unet( sample=lowerCAmelCase__ , timestep=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , added_cond_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] if do_classifier_free_guidance: lowercase__ , lowercase__: str = noise_pred.split(latents.shape[1] , dim=1 ) lowercase__ , lowercase__: Union[str, Any] = noise_pred.chunk(2 ) lowercase__ , lowercase__: Union[str, Any] = variance_pred.chunk(2 ) lowercase__: Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase__: int = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase__ , lowercase__: Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase__: int = self.scheduler.step( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ , )[0] # post-processing lowercase__: List[str] = self.movq.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: lowercase__: List[Any] = image * 0.5 + 0.5 lowercase__: int = image.clamp(0 , 1 ) lowercase__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase__: Dict = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase__ )
196
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class __a ( __UpperCamelCase , unittest.TestCase ): __lowercase : int = PegasusTokenizer __lowercase : Any = PegasusTokenizerFast __lowercase : Optional[int] = True __lowercase : Tuple = True def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__: List[str] = PegasusTokenizer(lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' return ("This is a test", "This is a test") def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Optional[Any] = '</s>' lowercase__: Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(lowerCAmelCase__ ) , 1_103 ) def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_103 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowercase__: Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowercase__: Optional[Any] = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) lowercase__: Dict = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0] lowercase__: Tuple = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: int = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowercase__: Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' lowercase__: Union[str, Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1] lowercase__: int = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Optional[int] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96_103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_024 lowercase__: int = 'To ensure a smooth flow of bank resolutions.' lowercase__: Any = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1] lowercase__: str = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Any = ['This is going to be way too long.' * 150, 'short example'] lowercase__: Tuple = ['not super long but more than 5 tokens', 'tiny'] lowercase__: Dict = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' ) lowercase__: Any = self._large_tokenizer( text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1_024) assert batch.attention_mask.shape == (2, 1_024) assert targets["input_ids"].shape == (2, 5) assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask. @slow def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' # fmt: off lowercase__: List[str] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class __a ( __UpperCamelCase , unittest.TestCase ): __lowercase : int = PegasusTokenizer __lowercase : Any = PegasusTokenizerFast __lowercase : Any = True __lowercase : Dict = True def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__: Union[str, Any] = PegasusTokenizer(lowerCAmelCase__ , offset=0 , mask_token_sent=lowerCAmelCase__ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' return ("This is a test", "This is a test") def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowercase__: str = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowercase__: Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) lowercase__: List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0] lowercase__: Any = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: List[Any] = ['This is going to be way too long.' * 1_000, 'short example'] lowercase__: str = ['not super long but more than 5 tokens', 'tiny'] lowercase__: Tuple = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' ) lowercase__: Dict = self._large_tokenizer( text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4_096) assert batch.attention_mask.shape == (2, 4_096) assert targets["input_ids"].shape == (2, 5) assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask. def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: str = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) lowercase__: Optional[int] = self._large_tokenizer(lowerCAmelCase__ ).input_ids self.assertListEqual( lowerCAmelCase__ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
196
1
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( _UpperCAmelCase ,unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: super().setUp() A : Optional[Any] = [ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **__lowerCamelCase : Union[str, Any] ) -> List[str]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , **__lowerCamelCase : Dict ) -> Optional[Any]: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : int ) -> Dict: A : Tuple = '''UNwant\u00E9d,running''' A : Optional[int] = '''unwanted, running''' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: A : List[str] = self.tokenizer_class(self.vocab_file ) A : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: A : Union[str, Any] = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: A : int = tokenizer("UNwant\u00E9d,running" ) A : Union[str, Any] = len(inputs["input_ids"] ) - 1 self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len ) A : Union[str, Any] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" ) self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
352
from collections.abc import Generator from math import sin def UpperCAmelCase ( _lowerCamelCase ): if len(_lowerCamelCase ) != 32: raise ValueError("Input must be of length 32" ) A : Any = B"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase ( _lowerCamelCase ): if i < 0: raise ValueError("Input must be non-negative" ) A : List[Any] = format(_lowerCamelCase , "08x" )[-8:] A : List[str] = B"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def UpperCAmelCase ( _lowerCamelCase ): A : Optional[Any] = B"" for char in message: bit_string += format(_lowerCamelCase , "08b" ).encode("utf-8" ) A : int = format(len(_lowerCamelCase ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_lowerCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase ( _lowerCamelCase ): if len(_lowerCamelCase ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(_lowerCamelCase ) , 512 ): A : Optional[int] = bit_string[pos : pos + 512] A : List[str] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase ( _lowerCamelCase ): if i < 0: raise ValueError("Input must be non-negative" ) A : Union[str, Any] = format(_lowerCamelCase , "032b" ) A : List[str] = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(_lowerCamelCase , 2 ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return (a + b) % 2**32 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase ( _lowerCamelCase ): A : Union[str, Any] = preprocess(_lowerCamelCase ) A : Any = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states A : Optional[int] = 0X67452301 A : Any = 0Xefcdab89 A : Tuple = 0X98badcfe A : Union[str, Any] = 0X10325476 A : Optional[Any] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_lowerCamelCase ): A : Optional[Any] = aa A : Optional[Any] = ba A : List[Any] = ca A : Optional[int] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f A : Dict = d ^ (b & (c ^ d)) A : Optional[Any] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f A : Optional[int] = c ^ (d & (b ^ c)) A : List[Any] = (5 * i + 1) % 16 elif i <= 47: A : Tuple = b ^ c ^ d A : str = (3 * i + 5) % 16 else: A : Union[str, Any] = c ^ (b | not_aa(_lowerCamelCase )) A : Any = (7 * i) % 16 A : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32 A : Dict = d A : Optional[int] = c A : Optional[int] = b A : Any = sum_aa(_lowerCamelCase , left_rotate_aa(_lowerCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total A : Dict = sum_aa(_lowerCamelCase , _lowerCamelCase ) A : Any = sum_aa(_lowerCamelCase , _lowerCamelCase ) A : Dict = sum_aa(_lowerCamelCase , _lowerCamelCase ) A : Union[str, Any] = sum_aa(_lowerCamelCase , _lowerCamelCase ) A : Optional[Any] = reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
256
0
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str=5 ) -> Any: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 UpperCAmelCase : Dict = torch.tensor(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1 UpperCAmelCase : List[str] = model(_lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple UpperCAmelCase : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() UpperCAmelCase : int = logits[0, masked_index, :] UpperCAmelCase : str = logits.softmax(dim=0 ) UpperCAmelCase , UpperCAmelCase : str = prob.topk(k=_lowerCAmelCase , dim=0 ) UpperCAmelCase : str = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCAmelCase ) )] ) UpperCAmelCase : Any = tokenizer.mask_token UpperCAmelCase : Optional[int] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): UpperCAmelCase : int = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(_lowerCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(_lowerCAmelCase ) , _lowerCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_lowerCAmelCase , _lowerCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCamelCase__: Optional[int] = CamembertTokenizer.from_pretrained("camembert-base") UpperCamelCase__: List[Any] = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCamelCase__: int = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
23
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel UpperCamelCase__: Tuple = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" @classmethod def A ( cls : Union[str, Any] ) -> int: UpperCAmelCase : Optional[Any] = TOKEN HfFolder.save_token(__snake_case ) @classmethod def A ( cls : List[str] ) -> Tuple: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def A ( self : int ) -> Tuple: UpperCAmelCase : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase : Dict = FlaxBertModel(__snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token ) UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase : str = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" ) def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase : Dict = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase : Optional[Any] = FlaxBertModel(__snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token ) UpperCAmelCase : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" ) def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Union[str, Any]: UpperCAmelCase : str = True UpperCAmelCase : int = flatten_dict(modela.params ) UpperCAmelCase : Dict = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: UpperCAmelCase : Dict = False return models_are_equal @require_flax class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" def A ( self : Tuple ) -> Union[str, Any]: UpperCAmelCase : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase : Dict = FlaxBertModel(__snake_case ) UpperCAmelCase : int = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__snake_case , __snake_case ) ) with self.assertRaises(__snake_case ): UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case ) UpperCAmelCase : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case ) self.assertTrue(check_models_equal(__snake_case , __snake_case ) ) def A ( self : List[str] ) -> Dict: UpperCAmelCase : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase : Dict = FlaxBertModel(__snake_case ) UpperCAmelCase : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(__snake_case ): UpperCAmelCase : Any = FlaxBertModel.from_pretrained(__snake_case ) UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case ) self.assertTrue(check_models_equal(__snake_case , __snake_case ) ) def A ( self : Optional[int] ) -> str: UpperCAmelCase : Dict = '''bert''' UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__snake_case ): UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case ) UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case ) self.assertIsNotNone(__snake_case ) def A ( self : Dict ) -> List[Any]: UpperCAmelCase : Optional[int] = '''bert''' UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__snake_case ): UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(__snake_case ) UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case ) self.assertIsNotNone(__snake_case )
23
1
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def snake_case_() -> List[Any]: """simple docstring""" _snake_case = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=lowercase_ , default=lowercase_ , required=lowercase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=lowercase_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=lowercase_ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=lowercase_ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=lowercase_ , default=0 , help='''cuda_id.''' , ) _snake_case = parser.parse_args() return args def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" if not len(lowercase_ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) _snake_case, _snake_case = imgs[0].size _snake_case = Image.new('''RGB''' , size=(cols * w, rows * h) ) _snake_case, _snake_case = grid.size for i, img in enumerate(lowercase_ ): grid.paste(lowercase_ , box=(i % cols * w, i // cols * h) ) return grid def snake_case_(_UpperCamelCase , _UpperCamelCase="robotic cat with wings" , _UpperCamelCase=7.5 , _UpperCamelCase=50 , _UpperCamelCase=1 , _UpperCamelCase=42 , ) -> Tuple: """simple docstring""" _snake_case = torch.Generator(pipeline.device ).manual_seed(lowercase_ ) _snake_case = pipeline( lowercase_ , guidance_scale=lowercase_ , num_inference_steps=lowercase_ , generator=lowercase_ , num_images_per_prompt=lowercase_ , ).images _snake_case = int(math.sqrt(lowercase_ ) ) _snake_case = image_grid(lowercase_ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __A = parse_args() # Load models and create wrapper for stable diffusion __A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __A = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __A = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __A = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __A = unet.to(torch.device('''cuda''', args.cuda_id)) __A = pipeline.to(unet.device) __A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __A = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
357
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase_ ( __lowercase , unittest.TestCase ): UpperCamelCase_ : Union[str, Any] = CLIPTokenizer UpperCamelCase_ : Optional[int] = CLIPTokenizerFast UpperCamelCase_ : Dict = True UpperCamelCase_ : Union[str, Any] = {} UpperCamelCase_ : Optional[Any] = False def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict: super().setUp() # fmt: off _snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) _snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>'''] _snake_case = {'''unk_token''': '''<unk>'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase_ ( self : List[Any] , **A__ : int ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase_ ( self : Any , **A__ : Tuple ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> str: _snake_case = '''lower newer''' _snake_case = '''lower newer''' return input_text, output_text def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[int]: _snake_case = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _snake_case = '''lower newer''' _snake_case = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>'''] _snake_case = tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) @require_ftfy def UpperCamelCase_ ( self : Any ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ ) _snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) _snake_case = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.''' _snake_case = tokenizer_s.tokenize(A__ ) _snake_case = tokenizer_r.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways _snake_case = '''xa\u0303y''' + ''' ''' + '''x\xe3y''' _snake_case = tokenizer_s.tokenize(A__ ) _snake_case = tokenizer_r.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Test that the tokenization is identical on unicode of space type _snake_case = [ '''\u0009''', # (horizontal tab, '\t') '''\u000B''', # (vertical tab) '''\u000C''', # (form feed) '''\u0020''', # (space, ' ') '''\u200E''', # (left-to-right mark):w '''\u200F''', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: _snake_case = tokenizer_s.tokenize(A__ ) _snake_case = tokenizer_r.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Test that the tokenization is identical on unicode of line break type _snake_case = [ '''\u000A''', # (line feed, '\n') '''\r\n''', # (carriage return and line feed, '\r\n') '''\u000D''', # (carriage return, '\r') '''\r''', # (carriage return, '\r') '''\u000D''', # (carriage return, '\r') '''\u2028''', # (line separator) '''\u2029''', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: _snake_case = tokenizer_s.tokenize(A__ ) _snake_case = tokenizer_r.tokenize(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` _snake_case = f"""{text_of_1_token} {text_of_1_token}""" _snake_case = self.rust_tokenizer_class.from_pretrained( A__ , use_fast=A__ , ) _snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , ) _snake_case = f""" {text}""" _snake_case = self.rust_tokenizer_class.from_pretrained( A__ , use_fast=A__ , ) _snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , ) def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]: # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(A__ ) as context: self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' ) self.assertTrue( context.exception.args[0].startswith( '''The `backend_tokenizer` provided does not match the expected format.''' ) ) @require_ftfy def UpperCamelCase_ ( self : Dict ) -> Union[str, Any]: super().test_tokenization_python_rust_equals() def UpperCamelCase_ ( self : str ) -> Optional[int]: # CLIP always lower cases letters pass
278
0
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =eval_examples lowerCamelCase_ =post_process_function def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase = "eval" ): """simple docstring""" lowerCamelCase_ =self.eval_dataset if eval_dataset is None else eval_dataset lowerCamelCase_ =self.get_eval_dataloader(lowerCAmelCase ) lowerCamelCase_ =self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase_ =self.compute_metrics lowerCamelCase_ =None lowerCamelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCamelCase_ =time.time() try: lowerCamelCase_ =eval_loop( lowerCAmelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCAmelCase, metric_key_prefix=lowerCAmelCase, ) finally: lowerCamelCase_ =compute_metrics lowerCamelCase_ =self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCAmelCase, lowerCAmelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCamelCase_ =self.post_process_function(lowerCAmelCase, lowerCAmelCase, output.predictions ) lowerCamelCase_ =self.compute_metrics(lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowerCamelCase_ =metrics.pop(lowerCAmelCase ) metrics.update(output.metrics ) else: lowerCamelCase_ =output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCamelCase_ =self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCAmelCase ) return metrics def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase = "test" ): """simple docstring""" lowerCamelCase_ =self.get_test_dataloader(lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase_ =self.compute_metrics lowerCamelCase_ =None lowerCamelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCamelCase_ =time.time() try: lowerCamelCase_ =eval_loop( lowerCAmelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCAmelCase, metric_key_prefix=lowerCAmelCase, ) finally: lowerCamelCase_ =compute_metrics lowerCamelCase_ =self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCAmelCase, lowerCAmelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCamelCase_ =self.post_process_function(lowerCAmelCase, lowerCAmelCase, output.predictions, '''predict''' ) lowerCamelCase_ =self.compute_metrics(lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowerCamelCase_ =metrics.pop(lowerCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCAmelCase )
75
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE :Tuple = """deformable_detr""" __SCREAMING_SNAKE_CASE :int = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Tuple , a__ : Any=True , a__ : str=None , a__ : str=3 , a__ : str=300 , a__ : Dict=1024 , a__ : Union[str, Any]=6 , a__ : Tuple=1024 , a__ : str=8 , a__ : Optional[Any]=6 , a__ : Union[str, Any]=1024 , a__ : Optional[Any]=8 , a__ : Optional[int]=0.0 , a__ : int=True , a__ : List[Any]="relu" , a__ : Union[str, Any]=256 , a__ : Dict=0.1 , a__ : Optional[int]=0.0 , a__ : Optional[int]=0.0 , a__ : List[Any]=0.02 , a__ : List[str]=1.0 , a__ : Any=True , a__ : Dict=False , a__ : str="sine" , a__ : Dict="resnet50" , a__ : Union[str, Any]=True , a__ : List[str]=False , a__ : Tuple=4 , a__ : int=4 , a__ : Optional[Any]=4 , a__ : Dict=False , a__ : Optional[int]=300 , a__ : List[str]=False , a__ : Union[str, Any]=1 , a__ : Tuple=5 , a__ : List[str]=2 , a__ : Any=1 , a__ : List[Any]=1 , a__ : Tuple=5 , a__ : Optional[Any]=2 , a__ : Optional[Any]=0.1 , a__ : Optional[int]=0.25 , a__ : int=False , **a__ : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __magic_name__ = backbone_config.get('''model_type''' ) __magic_name__ = CONFIG_MAPPING[backbone_model_type] __magic_name__ = config_class.from_dict(__SCREAMING_SNAKE_CASE ) __magic_name__ = use_timm_backbone __magic_name__ = backbone_config __magic_name__ = num_channels __magic_name__ = num_queries __magic_name__ = max_position_embeddings __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = init_xavier_std __magic_name__ = encoder_layerdrop __magic_name__ = auxiliary_loss __magic_name__ = position_embedding_type __magic_name__ = backbone __magic_name__ = use_pretrained_backbone __magic_name__ = dilation # deformable attributes __magic_name__ = num_feature_levels __magic_name__ = encoder_n_points __magic_name__ = decoder_n_points __magic_name__ = two_stage __magic_name__ = two_stage_num_proposals __magic_name__ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher __magic_name__ = class_cost __magic_name__ = bbox_cost __magic_name__ = giou_cost # Loss coefficients __magic_name__ = mask_loss_coefficient __magic_name__ = dice_loss_coefficient __magic_name__ = bbox_loss_coefficient __magic_name__ = giou_loss_coefficient __magic_name__ = eos_coefficient __magic_name__ = focal_alpha __magic_name__ = disable_custom_kernels super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def snake_case__ ( self : Union[str, Any] ): return self.encoder_attention_heads @property def snake_case__ ( self : Union[str, Any] ): return self.d_model def snake_case__ ( self : int ): __magic_name__ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __magic_name__ = self.backbone_config.to_dict() __magic_name__ = self.__class__.model_type return output
352
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]: '''simple docstring''' __magic_name__ , __magic_name__ = [], [] __magic_name__ = list(zip(a , a ) ) __magic_name__ , __magic_name__ = sorted_examples[0] def is_too_big(a ): return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __magic_name__ = new_src + ''' ''' + src __magic_name__ = new_tgt + ''' ''' + tgt if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example finished_src.append(a ) finished_tgt.append(a ) __magic_name__ , __magic_name__ = src, tgt else: # can fit, keep adding __magic_name__ , __magic_name__ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(a ) finished_tgt.append(a ) return finished_src, finished_tgt def UpperCamelCase ( a , a , a , a ) -> Any: '''simple docstring''' __magic_name__ = Path(a ) save_path.mkdir(exist_ok=a ) for split in ["train"]: __magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' __magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()] __magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()] __magic_name__ , __magic_name__ = pack_examples(a , a , a , a ) print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' ) Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) ) Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) ) for split in ["val", "test"]: __magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(a , save_path / F'''{split}.source''' ) shutil.copyfile(a , save_path / F'''{split}.target''' ) def UpperCamelCase ( ) -> List[str]: '''simple docstring''' __magic_name__ = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=a , default=128 ) parser.add_argument('''--data_dir''' , type=a ) parser.add_argument('''--save_path''' , type=a ) __magic_name__ = parser.parse_args() __magic_name__ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
98
0
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _snake_case : str = logging.get_logger(__name__) # General docstring _snake_case : Union[str, Any] = "MobileNetV1Config" # Base docstring _snake_case : List[Any] = "google/mobilenet_v1_1.0_224" _snake_case : Optional[Any] = [1, 1_024, 7, 7] # Image classification docstring _snake_case : Tuple = "google/mobilenet_v1_1.0_224" _snake_case : str = "tabby, tabby cat" _snake_case : Tuple = [ "google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ): __snake_case : Union[str, Any] = {} if isinstance(__lowerCamelCase , __lowerCamelCase ): __snake_case : List[str] = model.mobilenet_va else: __snake_case : List[Any] = model __snake_case : Optional[Any] = "MobilenetV1/Conv2d_0/" __snake_case : List[Any] = backbone.conv_stem.convolution.weight __snake_case : Dict = backbone.conv_stem.normalization.bias __snake_case : int = backbone.conv_stem.normalization.weight __snake_case : Union[str, Any] = backbone.conv_stem.normalization.running_mean __snake_case : List[Any] = backbone.conv_stem.normalization.running_var for i in range(1_3 ): __snake_case : int = i + 1 __snake_case : Union[str, Any] = i * 2 __snake_case : Tuple = backbone.layer[pt_index] __snake_case : Optional[Any] = F'MobilenetV1/Conv2d_{tf_index}_depthwise/' __snake_case : Any = pointer.convolution.weight __snake_case : str = pointer.normalization.bias __snake_case : List[Any] = pointer.normalization.weight __snake_case : List[str] = pointer.normalization.running_mean __snake_case : Dict = pointer.normalization.running_var __snake_case : Tuple = backbone.layer[pt_index + 1] __snake_case : Optional[Any] = F'MobilenetV1/Conv2d_{tf_index}_pointwise/' __snake_case : Union[str, Any] = pointer.convolution.weight __snake_case : Optional[Any] = pointer.normalization.bias __snake_case : Union[str, Any] = pointer.normalization.weight __snake_case : List[Any] = pointer.normalization.running_mean __snake_case : Optional[int] = pointer.normalization.running_var if isinstance(__lowerCamelCase , __lowerCamelCase ): __snake_case : Tuple = "MobilenetV1/Logits/Conv2d_1c_1x1/" __snake_case : List[str] = model.classifier.weight __snake_case : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model __snake_case : Any = tf.train.list_variables(__lowerCamelCase ) __snake_case : Optional[int] = {} for name, shape in init_vars: logger.info(F'Loading TF weight {name} with shape {shape}' ) __snake_case : Dict = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase ) __snake_case : List[str] = array # Build TF to PyTorch weights loading map __snake_case : int = _build_tf_to_pytorch_map(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'Importing {name}' ) if name not in tf_weights: logger.info(F'{name} not in tf pre-trained weights, skipping' ) continue __snake_case : str = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) __snake_case : Dict = np.transpose(__lowerCamelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer __snake_case : Any = array.squeeze().transpose() else: __snake_case : str = np.transpose(__lowerCamelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' ) logger.info(F'Initialize PyTorch weight {name} {array.shape}' ) __snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase ) tf_weights.pop(__lowerCamelCase , __lowerCamelCase ) tf_weights.pop(name + "/RMSProp" , __lowerCamelCase ) tf_weights.pop(name + "/RMSProp_1" , __lowerCamelCase ) tf_weights.pop(name + "/ExponentialMovingAverage" , __lowerCamelCase ) logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' ) return model def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case , __snake_case : List[Any] = features.shape[-2:] __snake_case , __snake_case : str = conv_layer.stride __snake_case , __snake_case : Tuple = conv_layer.kernel_size if in_height % stride_height == 0: __snake_case : List[Any] = max(kernel_height - stride_height , 0 ) else: __snake_case : List[str] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __snake_case : Any = max(kernel_width - stride_width , 0 ) else: __snake_case : Union[str, Any] = max(kernel_width - (in_width % stride_width) , 0 ) __snake_case : Any = pad_along_width // 2 __snake_case : Union[str, Any] = pad_along_width - pad_left __snake_case : str = pad_along_height // 2 __snake_case : Any = pad_along_height - pad_top __snake_case : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__lowerCamelCase , __lowerCamelCase , "constant" , 0.0 ) class a (nn.Module ): """simple docstring""" def __init__( self : str , lowerCamelCase : MobileNetVaConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[bool] = True , lowerCamelCase : Optional[bool or str] = True , ) -> None: super().__init__() __snake_case : List[str] = config if in_channels % groups != 0: raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' ) if out_channels % groups != 0: raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' ) __snake_case : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __snake_case : int = nn.Convad( in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , stride=lowerCamelCase , padding=lowerCamelCase , groups=lowerCamelCase , bias=lowerCamelCase , padding_mode="zeros" , ) if use_normalization: __snake_case : List[str] = nn.BatchNormad( num_features=lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=lowerCamelCase , track_running_stats=lowerCamelCase , ) else: __snake_case : Dict = None if use_activation: if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : int = ACTaFN[use_activation] elif isinstance(config.hidden_act , lowerCamelCase ): __snake_case : Any = ACTaFN[config.hidden_act] else: __snake_case : Optional[int] = config.hidden_act else: __snake_case : Tuple = None def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: __snake_case : Optional[Any] = apply_tf_padding(lowerCamelCase , self.convolution ) __snake_case : List[Any] = self.convolution(lowerCamelCase ) if self.normalization is not None: __snake_case : Tuple = self.normalization(lowerCamelCase ) if self.activation is not None: __snake_case : Dict = self.activation(lowerCamelCase ) return features class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = MobileNetVaConfig __UpperCAmelCase : Optional[int] = load_tf_weights_in_mobilenet_va __UpperCAmelCase : Dict = "mobilenet_v1" __UpperCAmelCase : int = "pixel_values" __UpperCAmelCase : str = False def __snake_case ( self : Dict , lowerCamelCase : Union[nn.Linear, nn.Convad] ) -> None: if isinstance(lowerCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowerCamelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _snake_case : str = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _snake_case : List[Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _lowerCAmelCase , ) class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Any , lowerCamelCase : MobileNetVaConfig , lowerCamelCase : bool = True ) -> int: super().__init__(lowerCamelCase ) __snake_case : List[Any] = config __snake_case : int = 32 __snake_case : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) __snake_case : str = MobileNetVaConvLayer( lowerCamelCase , in_channels=config.num_channels , out_channels=lowerCamelCase , kernel_size=3 , stride=2 , ) __snake_case : str = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __snake_case : str = nn.ModuleList() for i in range(13 ): __snake_case : Optional[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __snake_case : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase , ) ) self.layer.append( MobileNetVaConvLayer( lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=1 , ) ) __snake_case : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __snake_case ( self : Any , lowerCamelCase : Dict ) -> Dict: raise NotImplementedError @add_start_docstrings_to_model_forward(lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: __snake_case : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) __snake_case : Tuple = self.conv_stem(lowerCamelCase ) __snake_case : str = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __snake_case : Dict = layer_module(lowerCamelCase ) if output_hidden_states: __snake_case : int = all_hidden_states + (hidden_states,) __snake_case : Any = hidden_states if self.pooler is not None: __snake_case : Optional[int] = torch.flatten(self.pooler(lowerCamelCase ) , start_dim=1 ) else: __snake_case : List[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=lowerCamelCase , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowerCAmelCase , ) class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase : MobileNetVaConfig ) -> None: super().__init__(lowerCamelCase ) __snake_case : Optional[Any] = config.num_labels __snake_case : Dict = MobileNetVaModel(lowerCamelCase ) __snake_case : Dict = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __snake_case : Dict = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase ) __snake_case : Any = nn.Linear(lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __snake_case ( self : Any , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: __snake_case : List[str] = return_dict if return_dict is not None else self.config.use_return_dict __snake_case : List[Any] = self.mobilenet_va(lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase ) __snake_case : Any = outputs.pooler_output if return_dict else outputs[1] __snake_case : int = self.classifier(self.dropout(lowerCamelCase ) ) __snake_case : str = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __snake_case : List[str] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __snake_case : Union[str, Any] = "single_label_classification" else: __snake_case : Union[str, Any] = "multi_label_classification" if self.config.problem_type == "regression": __snake_case : Optional[int] = MSELoss() if self.num_labels == 1: __snake_case : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __snake_case : str = loss_fct(lowerCamelCase , lowerCamelCase ) elif self.config.problem_type == "single_label_classification": __snake_case : str = CrossEntropyLoss() __snake_case : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __snake_case : Union[str, Any] = BCEWithLogitsLoss() __snake_case : List[str] = loss_fct(lowerCamelCase , lowerCamelCase ) if not return_dict: __snake_case : Dict = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , )
123
from importlib import import_module from .logging import get_logger _snake_case : Optional[int] = get_logger(__name__) class a : """simple docstring""" def __init__( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]=None ) -> Any: __snake_case : Dict = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) ) __snake_case : int = module._original_module if isinstance(lowerCamelCase , _PatchedModuleObj ) else module class a : """simple docstring""" __UpperCAmelCase : List[Any] = [] def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=None ) -> List[Any]: __snake_case : Union[str, Any] = obj __snake_case : Dict = target __snake_case : Any = new __snake_case : List[str] = target.split("." )[0] __snake_case : Union[str, Any] = {} __snake_case : int = attrs or [] def __enter__( self : List[Any] ) -> Tuple: *__snake_case , __snake_case : int = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCamelCase ) ): try: __snake_case : Any = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __snake_case : Union[str, Any] = getattr(self.obj , lowerCamelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __snake_case : List[Any] = obj_attr # patch at top level setattr(self.obj , lowerCamelCase , _PatchedModuleObj(lowerCamelCase , attrs=self.attrs ) ) __snake_case : Optional[int] = getattr(self.obj , lowerCamelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCamelCase , lowerCamelCase , _PatchedModuleObj(getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , attrs=self.attrs ) ) __snake_case : List[Any] = getattr(lowerCamelCase , lowerCamelCase ) # finally set the target attribute setattr(lowerCamelCase , lowerCamelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __snake_case : Union[str, Any] = getattr(import_module(".".join(lowerCamelCase ) ) , lowerCamelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCamelCase ) is attr_value: __snake_case : Tuple = getattr(self.obj , lowerCamelCase ) setattr(self.obj , lowerCamelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __snake_case : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCamelCase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self : Any , *lowerCamelCase : Any ) -> Optional[int]: for attr in list(self.original ): setattr(self.obj , lowerCamelCase , self.original.pop(lowerCamelCase ) ) def __snake_case ( self : Optional[Any] ) -> Optional[int]: self.__enter__() self._active_patches.append(self ) def __snake_case ( self : Any ) -> List[str]: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
123
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): @register_to_config def __init__( self : Optional[int] , lowerCamelCase__ : int = 7_68 , ) ->List[Any]: '''simple docstring''' super().__init__() _UpperCAmelCase : List[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) ) _UpperCAmelCase : Any = nn.Parameter(torch.ones(1 , lowerCamelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[Union[str, torch.device]] = None , lowerCamelCase__ : Optional[torch.dtype] = None , ) ->str: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = nn.Parameter(self.mean.to(lowerCamelCase__ ).to(lowerCamelCase__ ) ) _UpperCAmelCase : str = nn.Parameter(self.std.to(lowerCamelCase__ ).to(lowerCamelCase__ ) ) return self def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] ) ->Any: '''simple docstring''' _UpperCAmelCase : str = (embeds - self.mean) * 1.0 / self.std return embeds def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any ) ->Dict: '''simple docstring''' _UpperCAmelCase : Any = (embeds * self.std) + self.mean return embeds
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase : int = "resnet" lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"] def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) _UpperCAmelCase : str = num_channels _UpperCAmelCase : List[str] = embedding_size _UpperCAmelCase : Tuple = hidden_sizes _UpperCAmelCase : Dict = depths _UpperCAmelCase : List[Any] = layer_type _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : Tuple = downsample_in_first_stage _UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names ) class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Optional[Any] = version.parse("1.11" ) @property def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase__ ( self : str ) ->float: '''simple docstring''' return 1E-3
322
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class UpperCAmelCase_ ( _lowercase): snake_case__ = '''dpt''' def __init__( self : Tuple , __UpperCamelCase : str=768 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Dict=12 , __UpperCamelCase : Any=3072 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : str=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Dict=1E-12 , __UpperCamelCase : Any=384 , __UpperCamelCase : str=16 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : int=False , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=[2, 5, 8, 11] , __UpperCamelCase : Any="project" , __UpperCamelCase : Any=[4, 2, 1, 0.5] , __UpperCamelCase : Tuple=[96, 192, 384, 768] , __UpperCamelCase : Dict=256 , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=0.4 , __UpperCamelCase : List[Any]=255 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Union[str, Any]=[1, 1024, 24, 24] , __UpperCamelCase : List[Any]=[0, 1] , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[Any] , ) -> List[str]: super().__init__(**__UpperCamelCase ) _UpperCamelCase = hidden_size _UpperCamelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) _UpperCamelCase = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } _UpperCamelCase = BitConfig(**__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): logger.info('''Initializing the config with a `BiT` backbone.''' ) _UpperCamelCase = BitConfig(**__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCamelCase = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) _UpperCamelCase = backbone_featmap_shape _UpperCamelCase = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = [] _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = image_size _UpperCamelCase = patch_size _UpperCamelCase = num_channels _UpperCamelCase = qkv_bias _UpperCamelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) _UpperCamelCase = readout_type _UpperCamelCase = reassemble_factors _UpperCamelCase = neck_hidden_sizes _UpperCamelCase = fusion_hidden_size _UpperCamelCase = head_in_index _UpperCamelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _UpperCamelCase = use_auxiliary_head _UpperCamelCase = auxiliary_loss_weight _UpperCamelCase = semantic_loss_ignore_index _UpperCamelCase = semantic_classifier_dropout def _UpperCamelCase ( self : Dict ) -> Tuple: _UpperCamelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
256
"""simple docstring""" import logging from transformers import PretrainedConfig UpperCAmelCase = logging.getLogger(__name__) UpperCAmelCase = { """bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""", } class UpperCAmelCase_ ( _lowercase): snake_case__ = '''bertabs''' def __init__( self : Optional[Any] , __UpperCamelCase : List[Any]=3_0522 , __UpperCamelCase : Any=512 , __UpperCamelCase : int=6 , __UpperCamelCase : Optional[Any]=512 , __UpperCamelCase : Any=8 , __UpperCamelCase : int=512 , __UpperCamelCase : str=0.2 , __UpperCamelCase : List[str]=6 , __UpperCamelCase : Optional[Any]=768 , __UpperCamelCase : Union[str, Any]=8 , __UpperCamelCase : Optional[Any]=2048 , __UpperCamelCase : str=0.2 , **__UpperCamelCase : List[Any] , ) -> Union[str, Any]: super().__init__(**__UpperCamelCase ) _UpperCamelCase = vocab_size _UpperCamelCase = max_pos _UpperCamelCase = enc_layers _UpperCamelCase = enc_hidden_size _UpperCamelCase = enc_heads _UpperCamelCase = enc_ff_size _UpperCamelCase = enc_dropout _UpperCamelCase = dec_layers _UpperCamelCase = dec_hidden_size _UpperCamelCase = dec_heads _UpperCamelCase = dec_ff_size _UpperCamelCase = dec_dropout
256
1
import pickle import numpy as np from matplotlib import pyplot as plt class a__ : def __init__( self , A , A , A , A , A , A=0.2 , A=0.2 ) -> Any: '''simple docstring''' a = bp_numa a = bp_numa a = bp_numa a = conva_get[:2] a = conva_get[2] a = size_pa a = rate_w a = rate_t a = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) a = -2 * np.random.rand(self.conva[1] ) + 1 a = -2 * np.random.rand(self.num_bpa ) + 1 a = -2 * np.random.rand(self.num_bpa ) + 1 def lowerCAmelCase_ ( self , A ) -> Optional[Any]: '''simple docstring''' a = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A , "wb" ) as f: pickle.dump(A , A ) print(F'''Model saved: {save_path}''' ) @classmethod def lowerCAmelCase_ ( cls , A ) -> Optional[Any]: '''simple docstring''' with open(A , "rb" ) as f: a = pickle.load(A ) # noqa: S301 a = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) a = model_dic.get("size_pooling1" ) a = model_dic.get("num_bp1" ) a = model_dic.get("num_bp2" ) a = model_dic.get("num_bp3" ) a = model_dic.get("rate_weight" ) a = model_dic.get("rate_thre" ) # create model instance a = CNN(A , A , A , A , A , A , A ) # modify model parameter a = model_dic.get("w_conv1" ) a = model_dic.get("wkj" ) a = model_dic.get("vji" ) a = model_dic.get("thre_conv1" ) a = model_dic.get("thre_bp2" ) a = model_dic.get("thre_bp3" ) return conv_ins def lowerCAmelCase_ ( self , A ) -> Optional[Any]: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def lowerCAmelCase_ ( self , A ) -> Optional[Any]: '''simple docstring''' return round(A , 3 ) def lowerCAmelCase_ ( self , A , A , A , A , A ) -> str: '''simple docstring''' a = convs[0] a = convs[1] a = np.shape(A )[0] # get the data slice of original image data, data_focus a = [] for i_focus in range(0 , size_data - size_conv + 1 , A ): for j_focus in range(0 , size_data - size_conv + 1 , A ): a = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A ) # calculate the feature map of every single kernel, and saved as list of matrix a = [] a = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A ): a = [] for i_focus in range(len(A ) ): a = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A ) ) a = np.asmatrix(A ).reshape( A , A ) data_featuremap.append(A ) # expanding the data slice to One dimenssion a = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A ) ) a = np.asarray(A ) return focus_list, data_featuremap def lowerCAmelCase_ ( self , A , A , A="average_pool" ) -> Tuple: '''simple docstring''' a = len(featuremaps[0] ) a = int(size_map / size_pooling ) a = [] for i_map in range(len(A ) ): a = featuremaps[i_map] a = [] for i_focus in range(0 , A , A ): for j_focus in range(0 , A , A ): a = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A ) ) a = np.asmatrix(A ).reshape(A , A ) featuremap_pooled.append(A ) return featuremap_pooled def lowerCAmelCase_ ( self , A ) -> List[Any]: '''simple docstring''' a = [] for i in range(len(A ) ): a = np.shape(data[i] ) a = data[i].reshape(1 , shapes[0] * shapes[1] ) a = data_listed.getA().tolist()[0] data_expanded.extend(A ) a = np.asarray(A ) return data_expanded def lowerCAmelCase_ ( self , A ) -> Optional[int]: '''simple docstring''' a = np.asarray(A ) a = np.shape(A ) a = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def lowerCAmelCase_ ( self , A , A , A , A , A ) -> Tuple: '''simple docstring''' a = [] a = 0 for i_map in range(A ): a = np.ones((size_map, size_map) ) for i in range(0 , A , A ): for j in range(0 , A , A ): a = pd_pool[ i_pool ] a = i_pool + 1 a = np.multiply( A , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A ) return pd_all def lowerCAmelCase_ ( self , A , A , A , A , A , A=bool ) -> str: '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A )) ) print((" - - Shape: Teach_Data ", np.shape(A )) ) a = 0 a = [] a = 10000 while rp < n_repeat and mse >= error_accuracy: a = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(A ) ): # print('------------Learning Image: %d--------------'%p) a = np.asmatrix(datas_train[p] ) a = np.asarray(datas_teach[p] ) a , a = self.convolute( A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a = self.pooling(A , self.size_poolinga ) a = np.shape(A ) a = self._expand(A ) a = data_bp_input a = np.dot(A , self.vji.T ) - self.thre_bpa a = self.sig(A ) a = np.dot(A , self.wkj.T ) - self.thre_bpa a = self.sig(A ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- a = np.multiply( (data_teach - bp_outa) , np.multiply(A , (1 - bp_outa) ) ) a = np.multiply( np.dot(A , self.wkj ) , np.multiply(A , (1 - bp_outa) ) ) a = np.dot(A , self.vji ) a = pd_i_all / (self.size_poolinga * self.size_poolinga) a = pd_conva_pooled.T.getA().tolist() a = self._calculate_gradient_from_pool( A , A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): a = self._expand_mat(pd_conva_all[k_conv] ) a = self.rate_weight * np.dot(A , A ) a = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) a = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer a = self.wkj + pd_k_all.T * bp_outa * self.rate_weight a = self.vji + pd_j_all.T * bp_outa * self.rate_weight a = self.thre_bpa - pd_k_all * self.rate_thre a = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image a = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) a = rp + 1 a = error_count / patterns all_mse.append(A ) def draw_error(): a = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A , "+-" ) plt.plot(A , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def lowerCAmelCase_ ( self , A ) -> int: '''simple docstring''' a = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A )) ) for p in range(len(A ) ): a = np.asmatrix(datas_test[p] ) a , a = self.convolute( A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a = self.pooling(A , self.size_poolinga ) a = self._expand(A ) a = data_bp_input a = bp_outa * self.vji.T - self.thre_bpa a = self.sig(A ) a = bp_outa * self.wkj.T - self.thre_bpa a = self.sig(A ) produce_out.extend(bp_outa.getA().tolist() ) a = [list(map(self.do_round , A ) ) for each in produce_out] return np.asarray(A ) def lowerCAmelCase_ ( self , A ) -> str: '''simple docstring''' a = np.asmatrix(A ) a , a = self.convolute( A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a = self.pooling(A , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
363
lowercase__ : str = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowercase__ : Any = [{"type": "code", "content": INSTALL_CONTENT}] lowercase__ : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
180
0
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def __init__( self: List[Any] , snake_case: int=0 ) -> int: # a graph with Node 0,1,...,N-1 snake_case_ :List[str] = n snake_case_ :int = [ [math.inf for j in range(0 , snake_case )] for i in range(0 , snake_case ) ] # adjacency matrix for weight snake_case_ :str = [ [math.inf for j in range(0 , snake_case )] for i in range(0 , snake_case ) ] # dp[i][j] stores minimum distance from i to j def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: Optional[Any] , snake_case: str ) -> Tuple: snake_case_ :List[Any] = w def lowerCAmelCase_ ( self: List[str] ) -> str: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): snake_case_ :Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCAmelCase_ ( self: int , snake_case: List[Any] , snake_case: Optional[Any] ) -> Union[str, Any]: return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
66
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]: A__ : Union[str, Any] = DPTConfig() if "large" in checkpoint_url: A__ : int = 1_0_2_4 A__ : Union[str, Any] = 4_0_9_6 A__ : Optional[int] = 2_4 A__ : int = 1_6 A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3] A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4] A__ : Tuple = (1, 3_8_4, 3_8_4) if "ade" in checkpoint_url: A__ : Optional[int] = True A__ : int = 1_5_0 A__ : Union[str, Any] = """huggingface/label-files""" A__ : List[Any] = """ade20k-id2label.json""" A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) ) A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} A__ : Dict = idalabel A__ : List[Any] = {v: k for k, v in idalabel.items()} A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0] return config, expected_shape def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any: A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ ) def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" ) if "pretrained.model" in name: A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" ) if "patch_embed" in name: A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" ) if "pos_embed" in name: A__ : int = name.replace("""pos_embed""", """position_embeddings""" ) if "attn.proj" in name: A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" ) if "proj" in name and "project" not in name: A__ : List[Any] = name.replace("""proj""", """projection""" ) if "blocks" in name: A__ : Optional[Any] = name.replace("""blocks""", """layer""" ) if "mlp.fc1" in name: A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" ) if "norm1" in name: A__ : Any = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name: A__ : List[str] = name.replace("""norm2""", """layernorm_after""" ) if "scratch.output_conv" in name: A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" ) if "scratch" in name: A__ : List[str] = name.replace("""scratch""", """neck""" ) if "layer1_rn" in name: A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" ) if "layer2_rn" in name: A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" ) if "layer3_rn" in name: A__ : Any = name.replace("""layer3_rn""", """convs.2""" ) if "layer4_rn" in name: A__ : Any = name.replace("""layer4_rn""", """convs.3""" ) if "refinenet" in name: A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: A__ : Optional[Any] = name.replace("""out_conv""", """projection""" ) if "resConfUnit1" in name: A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" ) if "resConfUnit2" in name: A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" ) if "conv1" in name: A__ : Tuple = name.replace("""conv1""", """convolution1""" ) if "conv2" in name: A__ : List[Any] = name.replace("""conv2""", """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" ) if "bn" in name: A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" ) if "head" in name: A__ : Dict = name.replace("""head""", """head.head""" ) if "encoder.norm" in name: A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" ) if "auxlayer" in name: A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" ) return name def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ : List[str] = in_proj_weight[: config.hidden_size, :] A__ : int = in_proj_bias[: config.hidden_size] A__ : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : str = in_proj_weight[ -config.hidden_size :, : ] A__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def _lowerCAmelCase ( ) ->List[str]: A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str: A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ ) # load original state_dict from URL A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(UpperCAmelCase__ ) # rename keys for key in state_dict.copy().keys(): A__ : int = state_dict.pop(UpperCAmelCase__ ) A__ : str = val # read in qkv matrices read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ ) # load HuggingFace model A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() # Check outputs on an image A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4 A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ ) A__ : Optional[int] = prepare_img() A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" ) # forward pass A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth # Assert logits A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(UpperCAmelCase__ ) assert ( torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ ) ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCAmelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) A_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
296
"""simple docstring""" from collections import defaultdict from math import gcd def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_5_0_0_0_0_0 ) ->int: A__ : defaultdict = defaultdict(UpperCAmelCase__ ) A__ : Any = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1, UpperCAmelCase__, 2 ): if gcd(UpperCAmelCase__, UpperCAmelCase__ ) > 1: continue A__ : str = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'{solution() = }')
296
1
"""simple docstring""" import numpy as np def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array ) -> np.array: return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
44
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ : str = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowercase__ ( _UpperCAmelCase ): A__ : Optional[int] ="""unispeech""" def __init__( self : int , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : int=1e-5 , UpperCAmelCase_ : List[Any]="group" , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[str]=128 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=0.05 , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Tuple=320 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=100 , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]="mean" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=80 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.5 , **UpperCAmelCase_ : Dict , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = feat_extract_norm SCREAMING_SNAKE_CASE__ = feat_extract_activation SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = conv_bias SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE__ = len(self.conv_dim ) SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout SCREAMING_SNAKE_CASE__ = attention_dropout SCREAMING_SNAKE_CASE__ = activation_dropout SCREAMING_SNAKE_CASE__ = feat_proj_dropout SCREAMING_SNAKE_CASE__ = final_dropout SCREAMING_SNAKE_CASE__ = layerdrop SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_ctc_classes SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = do_stable_layer_norm SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum SCREAMING_SNAKE_CASE__ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE__ = apply_spec_augment SCREAMING_SNAKE_CASE__ = mask_time_prob SCREAMING_SNAKE_CASE__ = mask_time_length SCREAMING_SNAKE_CASE__ = mask_time_min_masks SCREAMING_SNAKE_CASE__ = mask_feature_prob SCREAMING_SNAKE_CASE__ = mask_feature_length SCREAMING_SNAKE_CASE__ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE__ = num_codevectors_per_group SCREAMING_SNAKE_CASE__ = num_codevector_groups SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout SCREAMING_SNAKE_CASE__ = num_negatives SCREAMING_SNAKE_CASE__ = codevector_dim SCREAMING_SNAKE_CASE__ = proj_codevector_dim SCREAMING_SNAKE_CASE__ = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE__ = ctc_loss_reduction SCREAMING_SNAKE_CASE__ = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE__ = replace_prob @property def A_ ( self : List[Any] ): return functools.reduce(operator.mul , self.conv_stride , 1 )
371
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
169
0
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) _lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__) _lowerCAmelCase : Any = {"facebook/bart-base": BartForConditionalGeneration} _lowerCAmelCase : List[str] = {"facebook/bart-base": BartTokenizer} def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=_lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=_lowerCAmelCase , default=_lowerCAmelCase , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCAmelCase , ) parser.add_argument( "--config_name" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=_lowerCAmelCase , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Where to store the final ONNX file." ) UpperCAmelCase__ = parser.parse_args() return args def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]="cpu" ): """simple docstring""" UpperCAmelCase__ = model_dict[model_name].from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase ) UpperCAmelCase__ = tokenizer_dict[model_name].from_pretrained(_lowerCAmelCase ) if model_name in ["facebook/bart-base"]: UpperCAmelCase__ = 0 UpperCAmelCase__ = None UpperCAmelCase__ = 0 return huggingface_model, tokenizer def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): """simple docstring""" model.eval() UpperCAmelCase__ = None UpperCAmelCase__ = torch.jit.script(BARTBeamSearchGenerator(_lowerCAmelCase ) ) with torch.no_grad(): UpperCAmelCase__ = 'My friends are cool but they eat too many carbs.' UpperCAmelCase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device ) UpperCAmelCase__ = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCAmelCase , max_length=_lowerCAmelCase , early_stopping=_lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( _lowerCAmelCase , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , _lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=_lowerCAmelCase , ) logger.info("Model exported to {}".format(_lowerCAmelCase ) ) UpperCAmelCase__ = remove_dup_initializers(os.path.abspath(_lowerCAmelCase ) ) logger.info("Deduplicated and optimized model written to {}".format(_lowerCAmelCase ) ) UpperCAmelCase__ = onnxruntime.InferenceSession(_lowerCAmelCase ) UpperCAmelCase__ = ort_sess.run( _lowerCAmelCase , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(_lowerCAmelCase ), "max_length": np.array(_lowerCAmelCase ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = parse_args() UpperCAmelCase__ = 5 UpperCAmelCase__ = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() UpperCAmelCase__ = torch.device(args.device ) UpperCAmelCase__ = load_model_tokenizer(args.model_name_or_path , _lowerCAmelCase ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(_lowerCAmelCase ) if args.max_length: UpperCAmelCase__ = args.max_length if args.num_beams: UpperCAmelCase__ = args.num_beams if args.output_file_path: UpperCAmelCase__ = args.output_file_path else: UpperCAmelCase__ = 'BART.onnx' logger.info("Exporting model to ONNX" ) export_and_validate_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
169
import math import qiskit def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts: """simple docstring""" if ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): raise TypeError('inputs must be integers.' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('inputs must be positive.' ) if ( (math.floor(SCREAMING_SNAKE_CASE ) != input_a) or (math.floor(SCREAMING_SNAKE_CASE ) != input_a) or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in) ): raise ValueError('inputs must be exact integers.' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('inputs must be less or equal to 2.' ) # build registers __lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' ) __lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' ) # list the entries __lowerCAmelCase: Any = [input_a, input_a, carry_in] __lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits __lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' ) __lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 ) return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
322
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = inspect.getfile(accelerate.test_utils ) __snake_case : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) __snake_case : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) __snake_case : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __snake_case : Tuple = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a__ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __snake_case : Tuple = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a__ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a__ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) __snake_case : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(a__ , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[int] = Accelerator() SCREAMING_SNAKE_CASE : Any = (accelerator.state.process_index + 2, 10) SCREAMING_SNAKE_CASE : Tuple = torch.randint(0, 10, shape).to(accelerator.device) SCREAMING_SNAKE_CASE : Any = "" SCREAMING_SNAKE_CASE : List[str] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." SCREAMING_SNAKE_CASE : Any = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = "▁" SCREAMING_SNAKE_CASE__ = {"vocab_file": "sentencepiece.bpe.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } SCREAMING_SNAKE_CASE__ = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class lowercase ( __lowerCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask'''] def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCAmelCase_ ) ) lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase = 1 lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Union[str, Any]: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowercase ) -> List[str]: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , lowercase ) -> List[str]: return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def _snake_case ( self , lowercase ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase = self.sp_model.PieceToId(lowerCAmelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , lowercase ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , lowercase ) -> List[Any]: lowerCAmelCase = """""".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , """ """ ).strip() return out_string def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,)
46
import random from typing import Any def snake_case ( snake_case__ :list) -> list[Any]: for _ in range(len(snake_case__)): _A = random.randint(0 , len(snake_case__) - 1) _A = random.randint(0 , len(snake_case__) - 1) _A , _A = data[b], data[a] return data if __name__ == "__main__": _SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5, 6, 7] _SCREAMING_SNAKE_CASE = ['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
180
0
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCAmelCase_ ( _snake_case : Any=32 , _snake_case : str=10 , _snake_case : Optional[Any]=100 , _snake_case : Union[str, Any]=1026 , _snake_case : List[str]=True , _snake_case : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : List[Any]="igf_context_pairs.jbl" , ) -> str: '''simple docstring''' set_seed(3 ) # generate train_data and objective_set __magic_name__ : Dict = generate_datasets( _snake_case , _snake_case , number=_snake_case , min_len=1026 , trim=_snake_case ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __magic_name__ : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model __magic_name__ : str = load_gpta("gpt2" ).to(_snake_case ) print("computing perplexity on objective set" ) __magic_name__ : Union[str, Any] = compute_perplexity(_snake_case , _snake_case , _snake_case ).item() print("perplexity on objective set:" , _snake_case ) # collect igf pairs and save to file demo.jbl collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCAmelCase_ ( _snake_case : int , _snake_case : str=15 , _snake_case : Optional[Any]=128 , _snake_case : Optional[Any]=100 , _snake_case : Any="igf_model.pt" , ) -> List[str]: '''simple docstring''' set_seed(42 ) # Load pre-trained model __magic_name__ : Any = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model __magic_name__ : List[str] = SecondaryLearner(_snake_case ) # Train secondary learner __magic_name__ : Tuple = train_secondary_learner( _snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=100 , igf_model_path=_snake_case , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCAmelCase_ ( _snake_case : str , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : int=32 , _snake_case : int=1000 , _snake_case : Union[str, Any]=16 , _snake_case : List[str]=1.0 , _snake_case : int=recopy_gpta , _snake_case : Optional[Any]=None , _snake_case : List[Any]=10 , _snake_case : Tuple="gpt2_finetuned.pt" , ) -> List[Any]: '''simple docstring''' __magic_name__ : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) __magic_name__ : List[Any] = RandomSampler(_snake_case ) __magic_name__ : str = DataLoader(_snake_case , sampler=_snake_case ) __magic_name__ : Dict = max_steps // (len(_snake_case )) + 1 __magic_name__ : Union[str, Any] = 0 __magic_name__ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case ) __magic_name__ : Union[str, Any] = recopy_model(_snake_case , _snake_case , _snake_case ) model.train() if secondary_learner is not None: secondary_learner.to(_snake_case ) secondary_learner.eval() __magic_name__ : str = [] __magic_name__ : str = 0 __magic_name__ : List[Any] = [] __magic_name__ : Tuple = [] # Compute the performance of the transformer model at the beginning __magic_name__ : Union[str, Any] = compute_perplexity(_snake_case , _snake_case , _snake_case ) test_perps.append(_snake_case ) print("Test perplexity, step" , _snake_case , ":" , _snake_case ) for epoch in range(int(_snake_case ) ): for step, example in enumerate(_snake_case ): torch.cuda.empty_cache() __magic_name__ : str = random.randint(0 , example.size(2 ) - context_len - 1 ) __magic_name__ : str = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __magic_name__ : Union[str, Any] = model(_snake_case , labels=_snake_case ) __magic_name__ : Tuple = True if secondary_learner is not None: __magic_name__ : Union[str, Any] = secondary_learner.forward( torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_snake_case ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __magic_name__ : Optional[int] = -1 if predicted_q < threshold: __magic_name__ : List[Any] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __magic_name__ : int = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __magic_name__ : Dict = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __magic_name__ : str = compute_perplexity(_snake_case , _snake_case , _snake_case ) test_perps.append(_snake_case ) print("Test perplexity, step" , _snake_case , ":" , _snake_case ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _snake_case ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCAmelCase_ ( ) -> List[str]: '''simple docstring''' __magic_name__ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=_snake_case , type=_snake_case , required=_snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=_snake_case , default=_snake_case , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=_snake_case , default=_snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=_snake_case , type=_snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=_snake_case , default=_snake_case , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=_snake_case , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=100 , type=_snake_case , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=100 , type=_snake_case , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1000 , type=_snake_case , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=128 , type=_snake_case , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=_snake_case , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=_snake_case , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=100 , type=_snake_case , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1026 , type=_snake_case , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=_snake_case , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=_snake_case , type=_snake_case , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=_snake_case , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_snake_case , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=_snake_case , type=_snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner __magic_name__ : List[str] = joblib.load("data/IGF_values.jbl" ) # Train secondary learner __magic_name__ : List[str] = training_secondary_learner( _snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model __magic_name__ : int = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __magic_name__ : List[str] = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_snake_case ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _snake_case , _snake_case , _snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
353
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( snake_case ): UpperCamelCase__ = ['image_processor', 'tokenizer'] UpperCamelCase__ = 'BridgeTowerImageProcessor' UpperCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _a , _a ): super().__init__(_a , _a ) def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ): __magic_name__ : Dict = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask __magic_name__ : List[str] = self.image_processor( _a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a ) encoding.update(_a ) return encoding def SCREAMING_SNAKE_CASE ( self , *_a , **_a ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE ( self , *_a , **_a ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = self.tokenizer.model_input_names __magic_name__ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
41
0
from functools import lru_cache @lru_cache def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' if num < 0: raise ValueError("""Number should not be negative.""" ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
296
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if "://" in dataset_path: SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1] return dataset_path def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) ) else: fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> None: '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = threading.Lock()
296
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a__ ( __UpperCamelCase , __UpperCamelCase=0.999 , __UpperCamelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) SCREAMING_SNAKE_CASE_ = [] for i in range(__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__SCREAMING_SNAKE_CASE ) / alpha_bar_fn(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) return torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class lowerCamelCase (__UpperCamelCase , __UpperCamelCase ): """simple docstring""" lowerCamelCase__ = [e.name for e in KarrasDiffusionSchedulers] lowerCamelCase__ = 2 @register_to_config def __init__( self : Optional[int] , __magic_name__ : int = 1_000 , __magic_name__ : float = 0.0_0085 , __magic_name__ : float = 0.012 , __magic_name__ : str = "linear" , __magic_name__ : Optional[Union[np.ndarray, List[float]]] = None , __magic_name__ : str = "epsilon" , __magic_name__ : Optional[bool] = False , __magic_name__ : Optional[bool] = False , __magic_name__ : float = 1.0 , __magic_name__ : str = "linspace" , __magic_name__ : int = 0 , ) -> Union[str, Any]: if trained_betas is not None: SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE_ = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="cosine" ) elif beta_schedule == "exp": SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="exp" ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) SCREAMING_SNAKE_CASE_ = 1.0 - self.betas SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = use_karras_sigmas def __A ( self : Tuple , __magic_name__ : int , __magic_name__ : int=None ) -> List[str]: if schedule_timesteps is None: SCREAMING_SNAKE_CASE_ = self.timesteps SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: SCREAMING_SNAKE_CASE_ = 1 if len(_lowerCAmelCase ) > 1 else 0 else: SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int] return indices[pos].item() @property def __A ( self : Optional[Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __A ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : Union[float, torch.FloatTensor] , ) -> Dict: SCREAMING_SNAKE_CASE_ = self.index_for_timestep(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.sigmas[step_index] SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5) return sample def __A ( self : List[Any] , __magic_name__ : int , __magic_name__ : Union[str, torch.device] = None , __magic_name__ : Optional[int] = None , ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = num_inference_steps SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE_ = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE_ = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) SCREAMING_SNAKE_CASE_ = np.log(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase ) if self.config.use_karras_sigmas: SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps ) SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] ) SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(_lowerCAmelCase ).startswith("mps" ): # mps does not support float64 SCREAMING_SNAKE_CASE_ = timesteps.to(_lowerCAmelCase , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE_ = timesteps.to(device=_lowerCAmelCase ) # empty dt and derivative SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE_ = defaultdict(_lowerCAmelCase ) def __A ( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> str: # get log sigma SCREAMING_SNAKE_CASE_ = np.log(_lowerCAmelCase ) # get distribution SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) SCREAMING_SNAKE_CASE_ = low_idx + 1 SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx] SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE_ = np.clip(_lowerCAmelCase , 0 , 1 ) # transform interpolation to time range SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape ) return t def __A ( self : Union[str, Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item() SCREAMING_SNAKE_CASE_ = in_sigmas[0].item() SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , _lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho) SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho) SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def __A ( self : Optional[int] ) -> Any: return self.dt is None def __A ( self : Any , __magic_name__ : Union[torch.FloatTensor, np.ndarray] , __magic_name__ : Union[float, torch.FloatTensor] , __magic_name__ : Union[torch.FloatTensor, np.ndarray] , __magic_name__ : bool = True , ) -> str: SCREAMING_SNAKE_CASE_ = self.index_for_timestep(_lowerCAmelCase ) # advance index counter by 1 SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE_ = self.sigmas[step_index] SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE_ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE_ = derivative SCREAMING_SNAKE_CASE_ = dt SCREAMING_SNAKE_CASE_ = sample else: # 2. 2nd order / Heun's method SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample SCREAMING_SNAKE_CASE_ = self.dt SCREAMING_SNAKE_CASE_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_lowerCAmelCase ) def __A ( self : Any , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.FloatTensor , ) -> Optional[int]: # Make sure sigmas and timesteps have the same device and dtype as original_samples SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ): # mps does not support float64 SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps] SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 ) SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma return noisy_samples def __len__( self : Tuple ) -> Dict: return self.config.num_train_timesteps
370
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A : str = logging.get_logger(__name__) A : Optional[int] = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''table-transformer''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" ) SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ ) # set timm attributes to None SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None SCREAMING_SNAKE_CASE_ = use_timm_backbone SCREAMING_SNAKE_CASE_ = backbone_config SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = num_queries SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = init_xavier_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = auxiliary_loss SCREAMING_SNAKE_CASE_ = position_embedding_type SCREAMING_SNAKE_CASE_ = backbone SCREAMING_SNAKE_CASE_ = use_pretrained_backbone SCREAMING_SNAKE_CASE_ = dilation # Hungarian matcher SCREAMING_SNAKE_CASE_ = class_cost SCREAMING_SNAKE_CASE_ = bbox_cost SCREAMING_SNAKE_CASE_ = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE_ = mask_loss_coefficient SCREAMING_SNAKE_CASE_ = dice_loss_coefficient SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient SCREAMING_SNAKE_CASE_ = giou_loss_coefficient SCREAMING_SNAKE_CASE_ = eos_coefficient super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ ) @property def __A ( self : Union[str, Any] ) -> int: return self.encoder_attention_heads @property def __A ( self : Any ) -> int: return self.d_model class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = version.parse('''1.11''' ) @property def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __A ( self : Any ) -> float: return 1e-5 @property def __A ( self : int ) -> int: return 12
305
0
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class a__( unittest.TestCase ): lowercase__ = MODEL_FOR_MASKED_LM_MAPPING lowercase__ = TF_MODEL_FOR_MASKED_LM_MAPPING def lowercase_ ( self : Optional[Any] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowercase_ ( self : Union[str, Any] ): a : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) a : Optional[Any] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1e-0_5, 'token': 3_80_15, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-0_5, 'token': 2_55_06, 'token_str': ' accuser'}, ] , ) a : Dict = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-0_5, 'token': 3_80_15, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-0_5, 'token': 2_55_06, 'token_str': ' accuser', }, ] , ) a : Union[str, Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 1_36_06, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-0_5, 'token': 34_99, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-0_5, 'token': 29_41, 'token_str': ' Te'}, ] , ) @require_torch def lowercase_ ( self : List[Any] ): a : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) a : Optional[int] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2e-0_5, 'token': 3_56_76, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-0_5, 'token': 1_64_16, 'token_str': 'ELS'}, ] , ) a : List[str] = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-0_5, 'token': 3_56_76, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-0_5, 'token': 1_64_16, 'token_str': 'ELS'}, ] , ) a : str = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1e-0_5, 'token': 34_99, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-0_5, 'token': 29_41, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 1_36_06, 'token_str': ' Clara'}, ] , ) a : List[str] = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=6 ) , [ [ { 'score': 2.2e-0_5, 'token': 3_56_76, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-0_5, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-0_5, 'token': 3_56_76, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-0_5, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def lowercase_ ( self : Any ): a : str = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() a : Dict = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__snake_case , __snake_case ) @slow @require_torch def lowercase_ ( self : Any ): a : List[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(__snake_case ) @slow @require_tf def lowercase_ ( self : Any ): a : Union[str, Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(__snake_case ) def lowercase_ ( self : Any , __snake_case : int ): a : List[str] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(__snake_case ) , [ {'sequence': 'My name is John', 'score': 0.008, 'token': 6_10, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.007, 'token': 15_73, 'token_str': ' Chris'}, ] , ) a : List[Any] = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(__snake_case ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.251, 'token': 22_01, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.214, 'token': 1_27_90, 'token_str': ' Lyon', }, ] , ) a : str = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(__snake_case ) , [ {'sequence': 'My name is Patrick', 'score': 0.005, 'token': 34_99, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_36_06, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.000, 'token': 29_41, 'token_str': ' Te'}, ] , ) @require_torch def lowercase_ ( self : List[Any] ): a : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) a : int = None a : List[str] = None self.run_pipeline_test(__snake_case , [] ) @require_tf def lowercase_ ( self : Any ): a : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) a : List[Any] = None a : Optional[int] = None self.run_pipeline_test(__snake_case , [] ) def lowercase_ ( self : Any , __snake_case : Dict , __snake_case : Dict , __snake_case : Tuple ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) a : str = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case ) a : List[Any] = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def lowercase_ ( self : Optional[Any] , __snake_case : Tuple , __snake_case : str ): a : Dict = fill_masker.tokenizer a : Any = fill_masker.model a : List[Any] = fill_masker( F"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ] , ) a : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ] , ) a : Any = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( __snake_case , [ [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ], [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ], ] , ) with self.assertRaises(__snake_case ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__snake_case ): fill_masker('This is' ) self.run_test_top_k(__snake_case , __snake_case ) self.run_test_targets(__snake_case , __snake_case ) self.run_test_top_k_targets(__snake_case , __snake_case ) self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case ) self.fill_mask_with_multiple_masks(__snake_case , __snake_case ) def lowercase_ ( self : str , __snake_case : str , __snake_case : Union[str, Any] ): a : List[str] = tokenizer.get_vocab() a : Optional[int] = sorted(vocab.keys() )[:2] # Pipeline argument a : Any = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case ) a : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ] , ) a : List[Any] = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , __snake_case ) a : List[str] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(__snake_case ) ) # Call argument a : Tuple = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case ) a : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case ) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ] , ) a : Tuple = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , __snake_case ) a : Optional[int] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(__snake_case ) ) # Score equivalence a : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case ) a : List[str] = [top_mask['token_str'] for top_mask in outputs] a : str = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__snake_case ) == set(__snake_case ): a : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case ) a : str = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) ) # Raises with invalid with self.assertRaises(__snake_case ): a : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__snake_case ): a : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(__snake_case ): a : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='' ) def lowercase_ ( self : Optional[int] , __snake_case : int , __snake_case : Optional[Any] ): a : str = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 ) a : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ] , ) a : Tuple = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case ) a : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ] , ) self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) ) def lowercase_ ( self : List[Any] , __snake_case : Optional[int] , __snake_case : Any ): a : Any = tokenizer.get_vocab() a : Optional[int] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case ) # top_k=2, ntargets=3 a : str = sorted(vocab.keys() )[:3] a : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__snake_case ) # If we use the most probably targets, and filter differently, we should still # have the same results a : Optional[int] = [el['token_str'] for el in sorted(__snake_case , key=lambda __snake_case : x["score"] , reverse=__snake_case )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__snake_case ).issubset(__snake_case ): a : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__snake_case ) # They should yield exactly the same result self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) ) def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ): a : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case ) a : int = tokenizer.get_vocab() # String duplicates + id duplicates a : str = sorted(vocab.keys() )[:3] a : int = [targets[0], targets[1], targets[0], targets[2], targets[1]] a : Union[str, Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=__snake_case , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__snake_case ) , 3 ) def lowercase_ ( self : Dict , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ): a : Tuple = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case ) a : Optional[int] = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __snake_case , [ [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ], [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ], [ {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, {'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )}, ], ] , )
297
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class _UpperCamelCase ( lowerCAmelCase ): def __init__( self :Optional[int] , **lowerCamelCase :Dict ) -> int: super().__init__(**lowerCamelCase ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) self.check_model_type(lowerCamelCase ) def UpperCAmelCase_ ( self :Any , **lowerCamelCase :int ) -> int: UpperCAmelCase__ = {} UpperCAmelCase__ = {} UpperCAmelCase__ = {} # preprocess args if "points_per_batch" in kwargs: UpperCAmelCase__ = kwargs["points_per_batch"] if "points_per_crop" in kwargs: UpperCAmelCase__ = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: UpperCAmelCase__ = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: UpperCAmelCase__ = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: UpperCAmelCase__ = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: UpperCAmelCase__ = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: UpperCAmelCase__ = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: UpperCAmelCase__ = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: UpperCAmelCase__ = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: UpperCAmelCase__ = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: UpperCAmelCase__ = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: UpperCAmelCase__ = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , *lowerCamelCase :str , lowerCamelCase :Optional[Any]=None , lowerCamelCase :int=None , **lowerCamelCase :Optional[Any] ) -> str: return super().__call__(lowerCamelCase , *lowerCamelCase , num_workers=lowerCamelCase , batch_size=lowerCamelCase , **lowerCamelCase ) def UpperCAmelCase_ ( self :Any , lowerCamelCase :str , lowerCamelCase :Optional[Any]=64 , lowerCamelCase :int = 0 , lowerCamelCase :float = 512 / 1500 , lowerCamelCase :Optional[int] = 32 , lowerCamelCase :Optional[int] = 1 , ) -> Any: UpperCAmelCase__ = load_image(lowerCamelCase ) UpperCAmelCase__ = self.image_processor.size["longest_edge"] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = self.image_processor(images=lowerCamelCase , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": UpperCAmelCase__ = self.get_inference_context() with inference_context(): UpperCAmelCase__ = self._ensure_tensor_on_device(lowerCamelCase , device=self.device ) UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) UpperCAmelCase__ = image_embeddings UpperCAmelCase__ = grid_points.shape[1] UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :] UpperCAmelCase__ = input_labels[:, i : i + points_per_batch] UpperCAmelCase__ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[str] , lowerCamelCase :Union[str, Any]=0.88 , lowerCamelCase :Optional[Any]=0.95 , lowerCamelCase :Tuple=0 , lowerCamelCase :Union[str, Any]=1 , ) -> Dict: UpperCAmelCase__ = model_inputs.pop("input_boxes" ) UpperCAmelCase__ = model_inputs.pop("is_last" ) UpperCAmelCase__ = model_inputs.pop("original_sizes" ).tolist() UpperCAmelCase__ = model_inputs.pop("reshaped_input_sizes" ).tolist() UpperCAmelCase__ = self.model(**lowerCamelCase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks UpperCAmelCase__ = model_outputs["pred_masks"] UpperCAmelCase__ = self.image_processor.post_process_masks( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , binarize=lowerCamelCase ) UpperCAmelCase__ = model_outputs["iou_scores"] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCAmelCase_ ( self :int , lowerCamelCase :str , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :int=0.7 , ) -> Union[str, Any]: UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) UpperCAmelCase__ = torch.cat(lowerCamelCase ) UpperCAmelCase__ = torch.cat(lowerCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = defaultdict(lowerCamelCase ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowerCamelCase ) UpperCAmelCase__ = {} if output_rle_mask: UpperCAmelCase__ = rle_mask if output_bboxes_mask: UpperCAmelCase__ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
169
0
from __future__ import annotations import os from typing import Any import requests lowerCamelCase_ = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowerCamelCase_ = BASE_URL + '''/user''' # https://github.com/settings/tokens lowerCamelCase_ = os.environ.get('''USER_TOKEN''', '''''') def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = { """Authorization""": f"token {auth_token}", """Accept""": """application/vnd.github.v3+json""", } return requests.get(__a , headers=__a ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'{key}: {value}') else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
178
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
178
1
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _A (__a , __a ) -> float: """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__a , __a ) ) ) def _A (__a , __a ) -> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: SCREAMING_SNAKE_CASE_ : Tuple = ( '''Wrong input data\'s dimensions... ''' f'dataset : {dataset.ndim}, value_array : {value_array.ndim}' ) raise ValueError(__a ) try: if dataset.shape[1] != value_array.shape[1]: SCREAMING_SNAKE_CASE_ : Any = ( '''Wrong input data\'s shape... ''' f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}' ) raise ValueError(__a ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: SCREAMING_SNAKE_CASE_ : List[str] = ( '''Input data have different datatype... ''' f'dataset : {dataset.dtype}, value_array : {value_array.dtype}' ) raise TypeError(__a ) SCREAMING_SNAKE_CASE_ : Any = [] for value in value_array: SCREAMING_SNAKE_CASE_ : int = euclidean(__a , dataset[0] ) SCREAMING_SNAKE_CASE_ : List[str] = dataset[0].tolist() for dataset_value in dataset[1:]: SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean(__a , __a ) if dist > temp_dist: SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist SCREAMING_SNAKE_CASE_ : int = dataset_value.tolist() answer.append([vector, dist] ) return answer def _A (__a , __a ) -> float: """simple docstring""" return np.dot(__a , __a ) / (norm(__a ) * norm(__a )) if __name__ == "__main__": import doctest doctest.testmod()
91
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = num_stages __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = intermediate_size __snake_case = hidden_act __snake_case = num_labels __snake_case = initializer_range __snake_case = out_features __snake_case = out_indices __snake_case = scope def a (self : Dict ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def a (self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ): """simple docstring""" __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case = None __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Dict = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A_ : Optional[Any] = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) A_ : Dict = True A_ : Optional[Any] = False A_ : int = False A_ : int = False A_ : List[str] = False def a (self : List[str] ): """simple docstring""" __snake_case = ConvNextModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a (self : Tuple ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : str ): """simple docstring""" return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Optional[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a__ ) def a (self : Dict ): """simple docstring""" def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ): __snake_case = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a__ , a__ ) ) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def a (self : Any ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = ConvNextModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> List[str]: __snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def a (self : Tuple ): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def a (self : Optional[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ ) # forward pass with torch.no_grad(): __snake_case = model(**a__ ) # verify the logits __snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ): A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else () A_ : List[Any] = ConvNextConfig A_ : Optional[Any] = False def a (self : Optional[int] ): """simple docstring""" __snake_case = ConvNextModelTester(self )
24
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
353
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]=1024 ): UpperCamelCase_, UpperCamelCase_ : int = [], [] UpperCamelCase_ : Dict = list(zip(lowerCamelCase , lowerCamelCase ) ) UpperCamelCase_, UpperCamelCase_ : int = sorted_examples[0] def is_too_big(lowerCamelCase : str ): return tok(lowerCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): UpperCamelCase_ : Optional[Any] = new_src + ' ' + src UpperCamelCase_ : int = new_tgt + ' ' + tgt if is_too_big(lowerCamelCase ) or is_too_big(lowerCamelCase ): # cant fit, finalize example finished_src.append(lowerCamelCase ) finished_tgt.append(lowerCamelCase ) UpperCamelCase_, UpperCamelCase_ : Dict = src, tgt else: # can fit, keep adding UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(lowerCamelCase ) finished_tgt.append(lowerCamelCase ) return finished_src, finished_tgt def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Path , lowerCamelCase : Tuple , lowerCamelCase : Dict ): UpperCamelCase_ : List[Any] = Path(lowerCamelCase ) save_path.mkdir(exist_ok=lowerCamelCase ) for split in ["train"]: UpperCamelCase_, UpperCamelCase_ : Any = data_dir / F"{split}.source", data_dir / F"{split}.target" UpperCamelCase_ : List[Any] = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()] UpperCamelCase_ : Optional[int] = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()] UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(F"packed {split} split from {len(lowerCamelCase )} examples -> {len(lowerCamelCase )}." ) Path(save_path / F"{split}.source" ).open('w' ).write('\n'.join(lowerCamelCase ) ) Path(save_path / F"{split}.target" ).open('w' ).write('\n'.join(lowerCamelCase ) ) for split in ["val", "test"]: UpperCamelCase_, UpperCamelCase_ : Any = data_dir / F"{split}.source", data_dir / F"{split}.target" shutil.copyfile(lowerCamelCase , save_path / F"{split}.source" ) shutil.copyfile(lowerCamelCase , save_path / F"{split}.target" ) def __lowercase ( ): UpperCamelCase_ : int = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=lowerCamelCase , default=128 ) parser.add_argument('--data_dir' , type=lowerCamelCase ) parser.add_argument('--save_path' , type=lowerCamelCase ) UpperCamelCase_ : Tuple = parser.parse_args() UpperCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
50
0
from functools import lru_cache def _snake_case ( lowerCAmelCase : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 SCREAMING_SNAKE_CASE_ : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(lowerCAmelCase ) if n > 1: factors.add(lowerCAmelCase ) return factors @lru_cache def _snake_case ( lowerCAmelCase : Optional[Any] ): """simple docstring""" return len(unique_prime_factors(lowerCAmelCase ) ) def _snake_case ( lowerCAmelCase : Union[str, Any] ): """simple docstring""" return len(set(lowerCAmelCase ) ) in (0, 1) def _snake_case ( lowerCAmelCase : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 2 while True: # Increment each value of a generated range SCREAMING_SNAKE_CASE_ : List[Any] = [base + i for i in range(lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. SCREAMING_SNAKE_CASE_ : Dict = [upf_len(lowerCAmelCase ) for x in group] checker.append(lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def _snake_case ( lowerCAmelCase : str = 4 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = run(lowerCAmelCase ) return results[0] if len(lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
18
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y return abs(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Tuple: try: lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" ) lowerCamelCase__ : Any = int(nums[0] ) lowerCamelCase__ : Optional[Any] = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print("""Wrong input""" ) if __name__ == "__main__": main()
41
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE = 1000 ) ->int: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = -1 lowerCAmelCase__ :Optional[Any] = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowerCAmelCase__ :Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a) lowerCAmelCase__ :Union[str, Any] = n - a - b if c * c == (a * a + b * b): lowerCAmelCase__ :List[str] = a * b * c if candidate >= product: lowerCAmelCase__ :List[Any] = candidate return product if __name__ == "__main__": print(F'''{solution() = }''')
254
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
254
1
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = generate_pascal_triangle(_UpperCAmelCase ) for row_idx in range(_UpperCAmelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) SCREAMING_SNAKE_CASE_: list[list[int]] = [] for current_row_idx in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = populate_current_row(_UpperCAmelCase , _UpperCAmelCase ) triangle.append(_UpperCAmelCase ) return triangle def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = 1, 1 for current_col_idx in range(1 , _UpperCAmelCase ): calculate_current_element( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return current_row def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): SCREAMING_SNAKE_CASE_: str = triangle[current_row_idx - 1][current_col_idx - 1] SCREAMING_SNAKE_CASE_: Optional[int] = triangle[current_row_idx - 1][current_col_idx] SCREAMING_SNAKE_CASE_: str = above_to_left_elt + above_to_right_elt def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) SCREAMING_SNAKE_CASE_: list[list[int]] = [[1]] for row_index in range(1 , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = [0] + result[-1] + [0] SCREAMING_SNAKE_CASE_: Tuple = row_index + 1 # Calculate the number of distinct elements in a row SCREAMING_SNAKE_CASE_: Any = sum(divmod(_UpperCAmelCase , 2 ) ) SCREAMING_SNAKE_CASE_: Optional[int] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] SCREAMING_SNAKE_CASE_: Tuple = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() SCREAMING_SNAKE_CASE_: List[str] = row_first_half + row_second_half result.append(_UpperCAmelCase ) return result def A_ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None: SCREAMING_SNAKE_CASE_: int = f"{func.__name__}({value})" SCREAMING_SNAKE_CASE_: List[str] = timeit(f"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
13
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]: """simple docstring""" return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" lowercase__ = create_tensor(__magic_name__ ) lowercase__ = gather(__magic_name__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ = [state.process_index] lowercase__ = gather_object(__magic_name__ ) assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def UpperCamelCase ( __magic_name__ : str ) -> Dict: """simple docstring""" lowercase__ = create_tensor(__magic_name__ ) lowercase__ = broadcast(__magic_name__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def UpperCamelCase ( __magic_name__ : str ) -> Dict: """simple docstring""" if state.is_main_process: lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device ) else: lowercase__ = torch.arange(state.num_processes ).to(state.device ) lowercase__ = pad_across_processes(__magic_name__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" if state.num_processes != 2: return lowercase__ = create_tensor(__magic_name__ ) lowercase__ = reduce(__magic_name__ , """sum""" ) lowercase__ = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}''' def UpperCamelCase ( __magic_name__ : Dict ) -> int: """simple docstring""" if state.num_processes != 2: return lowercase__ = create_tensor(__magic_name__ ) lowercase__ = reduce(__magic_name__ , """mean""" ) lowercase__ = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}''' def UpperCamelCase ( __magic_name__ : str ) -> int: """simple docstring""" main() def UpperCamelCase ( ) -> Optional[int]: """simple docstring""" lowercase__ = PartialState() state.print(f'''State: {state}''' ) state.print("""testing gather""" ) test_gather(__magic_name__ ) state.print("""testing gather_object""" ) test_gather_object(__magic_name__ ) state.print("""testing broadcast""" ) test_broadcast(__magic_name__ ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(__magic_name__ ) state.print("""testing reduce_sum""" ) test_reduce_sum(__magic_name__ ) state.print("""testing reduce_mean""" ) test_reduce_mean(__magic_name__ ) if __name__ == "__main__": main()
305
0
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class a_ ( lowerCamelCase ): def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) ) class a_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> Any: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = depth_multiplier UpperCamelCase = depth_divisible_by UpperCamelCase = min_depth UpperCamelCase = expand_ratio UpperCamelCase = tf_padding UpperCamelCase = output_stride UpperCamelCase = first_layer_is_expansion UpperCamelCase = finegrained_output UpperCamelCase = hidden_act UpperCamelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCamelCase = classifier_dropout_prob UpperCamelCase = use_labels UpperCamelCase = is_training UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = scope def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def A__ ( self ) -> Optional[int]: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MobileNetVaForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = MobileNetVaModelTester(self ) UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" ) def A__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip(reason="""MobileNetV2 does not output attentions""" ) def A__ ( self ) -> List[Any]: """simple docstring""" pass def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = 16 self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE ) @slow def A__ ( self ) -> Tuple: """simple docstring""" for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowercase__ ( )-> Any: UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): @cached_property def A__ ( self ) -> Dict: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None ) @slow def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ) # verify the logits UpperCamelCase = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def A__ ( self ) -> Dict: """simple docstring""" UpperCamelCase = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) UpperCamelCase = model.to(_SCREAMING_SNAKE_CASE ) UpperCamelCase = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = outputs.logits # verify the logits UpperCamelCase = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor( [ [[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]], [[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]], [[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]], ] , device=_SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
183
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.speecht5') def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]: hf_model.apply_weight_norm() UpperCamelCase = checkpoint["""input_conv.weight_g"""] UpperCamelCase = checkpoint["""input_conv.weight_v"""] UpperCamelCase = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_g"] UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_v"] UpperCamelCase = checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] UpperCamelCase = checkpoint["""output_conv.1.weight_g"""] UpperCamelCase = checkpoint["""output_conv.1.weight_v"""] UpperCamelCase = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]: if config_path is not None: UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: UpperCamelCase = SpeechTaHifiGanConfig() UpperCamelCase = SpeechTaHifiGan(__UpperCamelCase ) UpperCamelCase = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] , __UpperCamelCase , __UpperCamelCase ) UpperCamelCase = np.load(__UpperCamelCase ) UpperCamelCase = stats[0].reshape(-1 ) UpperCamelCase = stats[1].reshape(-1 ) UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float() UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
183
1
from scipy.stats import pearsonr import datasets lowercase = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" lowercase = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n" lowercase = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def _UpperCamelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def _UpperCamelCase ( self , a , a , a=False ) -> Union[str, Any]: if return_pvalue: snake_case_ = pearsonr(a , a ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(a , a )[0] )}
178
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase = logging.get_logger(__name__) def __UpperCAmelCase ( a_ , a_=False): snake_case_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head'): snake_case_ = 'segformer.encoder.' + key if key.startswith('backbone'): snake_case_ = key.replace('backbone' , 'segformer.encoder') if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case_ = key[key.find('patch_embed') + len('patch_embed')] snake_case_ = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(a_)-1}''') if "norm" in key: snake_case_ = key.replace('norm' , 'layer_norm') if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case_ = key[key.find('segformer.encoder.layer_norm') + len('segformer.encoder.layer_norm')] snake_case_ = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(a_)-1}''') if "layer_norm1" in key: snake_case_ = key.replace('layer_norm1' , 'layer_norm_1') if "layer_norm2" in key: snake_case_ = key.replace('layer_norm2' , 'layer_norm_2') if "block" in key: # replace for example block1 by block.0 snake_case_ = key[key.find('block') + len('block')] snake_case_ = key.replace(f'''block{idx}''' , f'''block.{int(a_)-1}''') if "attn.q" in key: snake_case_ = key.replace('attn.q' , 'attention.self.query') if "attn.proj" in key: snake_case_ = key.replace('attn.proj' , 'attention.output.dense') if "attn" in key: snake_case_ = key.replace('attn' , 'attention.self') if "fc1" in key: snake_case_ = key.replace('fc1' , 'dense1') if "fc2" in key: snake_case_ = key.replace('fc2' , 'dense2') if "linear_pred" in key: snake_case_ = key.replace('linear_pred' , 'classifier') if "linear_fuse" in key: snake_case_ = key.replace('linear_fuse.conv' , 'linear_fuse') snake_case_ = key.replace('linear_fuse.bn' , 'batch_norm') if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case_ = key[key.find('linear_c') + len('linear_c')] snake_case_ = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(a_)-1}''') if key.startswith('head'): snake_case_ = key.replace('head' , 'classifier') snake_case_ = value return new_state_dict def __UpperCAmelCase ( a_ , a_): # for each of the encoder blocks: for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''') snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''') # next, add keys and values (in that order) to the state dict snake_case_ = kv_weight[ : config.hidden_sizes[i], : ] snake_case_ = kv_bias[: config.hidden_sizes[i]] snake_case_ = kv_weight[ config.hidden_sizes[i] :, : ] snake_case_ = kv_bias[ config.hidden_sizes[i] : ] def __UpperCAmelCase ( ): snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(a_ , stream=a_).raw) return image @torch.no_grad() def __UpperCAmelCase ( a_ , a_ , a_): snake_case_ = SegformerConfig() snake_case_ = False # set attributes based on model_name snake_case_ = 'huggingface/label-files' if "segformer" in model_name: snake_case_ = model_name[len('segformer.') : len('segformer.') + 2] if "ade" in model_name: snake_case_ = 1_50 snake_case_ = 'ade20k-id2label.json' snake_case_ = (1, 1_50, 1_28, 1_28) elif "city" in model_name: snake_case_ = 19 snake_case_ = 'cityscapes-id2label.json' snake_case_ = (1, 19, 1_28, 1_28) else: raise ValueError(f'''Model {model_name} not supported''') elif "mit" in model_name: snake_case_ = True snake_case_ = model_name[4:6] snake_case_ = 10_00 snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = (1, 10_00) else: raise ValueError(f'''Model {model_name} not supported''') # set config attributes snake_case_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset') , 'r')) snake_case_ = {int(a_): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 2_56 elif size == "b2": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 4, 6, 3] elif size == "b3": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 4, 18, 3] elif size == "b4": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 8, 27, 3] elif size == "b5": snake_case_ = [64, 1_28, 3_20, 5_12] snake_case_ = 7_68 snake_case_ = [3, 6, 40, 3] else: raise ValueError(f'''Size {size} not supported''') # load image processor (only resize + normalize) snake_case_ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=a_ , align=a_ , do_random_crop=a_) # prepare image snake_case_ = prepare_img() snake_case_ = image_processor(images=a_ , return_tensors='pt').pixel_values logger.info(f'''Converting model {model_name}...''') # load original state dict if encoder_only: snake_case_ = torch.load(a_ , map_location=torch.device('cpu')) else: snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))['state_dict'] # rename keys snake_case_ = rename_keys(a_ , encoder_only=a_) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_) # create HuggingFace model and load state dict if encoder_only: snake_case_ = False snake_case_ = SegformerForImageClassification(a_) else: snake_case_ = SegformerForSemanticSegmentation(a_) model.load_state_dict(a_) model.eval() # forward pass snake_case_ = model(a_) snake_case_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ]) elif model_name == "segformer.b1.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]], [[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]], [[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]], ]) elif model_name == "segformer.b2.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]], [[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]], [[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]], ]) elif model_name == "segformer.b3.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]], [[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]], [[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]], ]) elif model_name == "segformer.b4.512x512.ade.160k": snake_case_ = torch.tensor( [ [[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]], [[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]], [[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]], ]) elif model_name == "segformer.b5.640x640.ade.160k": snake_case_ = torch.tensor( [ [[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]], [[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]], [[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]], ]) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]], [[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]], [[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]], ]) elif model_name == "segformer.b0.512x1024.city.160k": snake_case_ = torch.tensor( [ [[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]], [[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]], [[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]], ]) elif model_name == "segformer.b0.640x1280.city.160k": snake_case_ = torch.tensor( [ [ [-1.1_372E01, -1.2_787E01, -1.3_477E01], [-1.2_536E01, -1.4_194E01, -1.4_409E01], [-1.3_217E01, -1.4_888E01, -1.5_327E01], ], [ [-1.4_791E01, -1.7_122E01, -1.8_277E01], [-1.7_163E01, -1.9_192E01, -1.9_533E01], [-1.7_897E01, -1.9_991E01, -2.0_315E01], ], [ [7.6_723E-01, 4.1_921E-01, -7.7_878E-02], [4.7_772E-01, 9.5_557E-03, -2.8_082E-01], [3.6_032E-01, -2.4_826E-01, -5.1_168E-01], ], ]) elif model_name == "segformer.b0.768x768.city.160k": snake_case_ = torch.tensor( [ [[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]], [[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]], [[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]], ]) elif model_name == "segformer.b1.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ]) elif model_name == "segformer.b2.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]], [[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]], [[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]], ]) elif model_name == "segformer.b3.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]], [[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]], [[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]], ]) elif model_name == "segformer.b4.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]], [[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]], [[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]], ]) elif model_name == "segformer.b5.1024x1024.city.160k": snake_case_ = torch.tensor( [ [[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]], [[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]], [[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]], ]) else: snake_case_ = logits.argmax(-1).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx]) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-2) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''') Path(a_).mkdir(exist_ok=a_) model.save_pretrained(a_) image_processor.save_pretrained(a_) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument( "--model_name", default="segformer.b0.512x512.ade.160k", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) lowercase = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
178
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): __snake_case : List[str] =KandinskyVaaInpaintPipeline __snake_case : Union[str, Any] =["image_embeds", "negative_image_embeds", "image", "mask_image"] __snake_case : Tuple =[ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __snake_case : str =[ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __snake_case : List[str] =False @property def UpperCamelCase ( self: Tuple ): '''simple docstring''' return 32 @property def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' return 32 @property def UpperCamelCase ( self: List[Any] ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' return 100 @property def UpperCamelCase ( self: str ): '''simple docstring''' torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ ) return model @property def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCamelCase ( self: List[Any] ): '''simple docstring''' torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs ) return model def UpperCamelCase ( self: List[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.dummy_unet _SCREAMING_SNAKE_CASE = self.dummy_movq _SCREAMING_SNAKE_CASE = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , ) _SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str]=0 ): '''simple docstring''' _SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCAmelCase_ ) # create init_image _SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0] _SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) ) # create mask _SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa ) _SCREAMING_SNAKE_CASE = 0 if str(UpperCAmelCase_ ).startswith("""mps""" ): _SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ ) else: _SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = { """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def UpperCamelCase ( self: str ): '''simple docstring''' _SCREAMING_SNAKE_CASE = """cpu""" _SCREAMING_SNAKE_CASE = self.get_dummy_components() _SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = output.images _SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0] _SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] _SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _SCREAMING_SNAKE_CASE = np.array( [0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def UpperCamelCase ( self: int ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCAmelCase (unittest.TestCase ): def UpperCamelCase ( self: List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = """a hat""" _SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa ) _SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior( UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _SCREAMING_SNAKE_CASE = pipeline( image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) _SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
367
UpperCamelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} UpperCamelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(snake_case__ ,snake_case__ ,snake_case__ ) order.append(snake_case__ ) return order def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(snake_case__ ,snake_case__ ,snake_case__ ) return component def __lowerCamelCase ( snake_case__ ) -> list[list[int]]: """simple docstring""" _SCREAMING_SNAKE_CASE = len(snake_case__ ) * [False] _SCREAMING_SNAKE_CASE = {vert: [] for vert in range(len(snake_case__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(snake_case__ ) _SCREAMING_SNAKE_CASE = [] for i, was_visited in enumerate(snake_case__ ): if not was_visited: order += topology_sort(snake_case__ ,snake_case__ ,snake_case__ ) _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = len(snake_case__ ) * [False] for i in range(len(snake_case__ ) ): _SCREAMING_SNAKE_CASE = order[len(snake_case__ ) - i - 1] if not visited[vert]: _SCREAMING_SNAKE_CASE = find_components(snake_case__ ,snake_case__ ,snake_case__ ) components_list.append(snake_case__ ) return components_list
125
0
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCAmelCase__ ( ) -> Optional[Any]: '''simple docstring''' raise RuntimeError("CUDA out of memory." ) class a__ ( nn.Module ): """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' super().__init__() A__ = nn.Linear(3 , 4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4 , 5 ) def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowercase ) ) ) class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(lowercase ): nonlocal batch_sizes batch_sizes.append(lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowercase , [128, 64, 32, 16, 8] ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(lowercase , lowercase ): nonlocal batch_sizes batch_sizes.append(lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga A__ , A__ = mock_training_loop_function("hello" ) self.assertListEqual(lowercase , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, "hello"] ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowercase ): pass with self.assertRaises(lowercase ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowercase ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowercase ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(lowercase , lowercase , lowercase ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowercase ) as cm: mock_training_loop_function(128 , "hello" , "world" ) self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] ) self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowercase ): raise ValueError("Oops, we had an error!" ) with self.assertRaises(lowercase ) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!" , cm.exception.args[0] ) @require_cuda def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = torch.cuda.memory_allocated() A__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowercase ) A__ = release_memory(lowercase ) self.assertEqual(torch.cuda.memory_allocated() , lowercase )
68
import flax.linen as nn import jax import jax.numpy as jnp class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = jnp.floataa def A_ ( self : Any ) -> Any: lowerCamelCase__ : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape lowerCamelCase__ : Union[str, Any] = jax.image.resize( UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase ) return hidden_states class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = jnp.floataa def A_ ( self : List[str] ) -> int: lowerCamelCase__ : Tuple = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase ) return hidden_states class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = 0.0 UpperCAmelCase__ = None UpperCAmelCase__ = jnp.floataa def A_ ( self : List[str] ) -> Union[str, Any]: lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) lowerCamelCase__ : int = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype ) lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob ) lowerCamelCase__ : Tuple = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut lowerCamelCase__ : Union[str, Any] = None if use_nin_shortcut: lowerCamelCase__ : Dict = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = hidden_states lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase ) lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase ) lowerCamelCase__ : Any = self.conva(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) ) lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 ) lowerCamelCase__ : List[str] = hidden_states + temb lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase ) lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : str = self.conva(UpperCAmelCase ) if self.conv_shortcut is not None: lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase ) return hidden_states + residual
50
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase : Any = logging.get_logger(__name__) if is_vision_available(): import PIL class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , _a = True , **_a , ): """simple docstring""" super().__init__(**_a ) lowerCamelCase = size if size is not None else {"""shortest_edge""": 224} lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase = get_size_dict(_a , default_to_square=_a , param_name="""crop_size""" ) lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = resample lowerCamelCase = do_center_crop lowerCamelCase = crop_size lowerCamelCase = do_rescale lowerCamelCase = rescale_factor lowerCamelCase = do_normalize lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase = do_convert_rgb def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ): """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): """simple docstring""" lowerCamelCase = do_resize if do_resize is not None else self.do_resize lowerCamelCase = size if size is not None else self.size lowerCamelCase = get_size_dict(_a , param_name="""size""" , default_to_square=_a ) lowerCamelCase = resample if resample is not None else self.resample lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase = crop_size if crop_size is not None else self.crop_size lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" , default_to_square=_a ) lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase = image_mean if image_mean is not None else self.image_mean lowerCamelCase = image_std if image_std is not None else self.image_std lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. lowerCamelCase = [to_numpy_array(_a ) for image in images] if do_resize: lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images] lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a )
351
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) lowerCAmelCase : List[str] = logging.getLogger(__name__) @dataclass(frozen=UpperCAmelCase__ ) class __magic_name__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None @dataclass(frozen=UpperCAmelCase__ ) class __magic_name__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 42 def __init__( self , _a , _a , _a , _a = None , _a=False , _a = False , ): """simple docstring""" lowerCamelCase = hans_processors[task]() lowerCamelCase = os.path.join( _a , """cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , ) lowerCamelCase = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase , lowerCamelCase = label_list[2], label_list[1] lowerCamelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase = cached_features_file + """.lock""" with FileLock(_a ): if os.path.exists(_a ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) lowerCamelCase = torch.load(_a ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) lowerCamelCase = ( processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a ) ) logger.info("""Training examples: %s""" , len(_a ) ) lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a ) logger.info("""Saving features into cached file %s""" , _a ) torch.save(self.features , _a ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _a ): """simple docstring""" return self.features[i] def _lowerCAmelCase ( self ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class __magic_name__ : '''simple docstring''' __UpperCamelCase = 42 def __init__( self , _a , _a , _a , _a = 128 , _a=False , _a = False , ): """simple docstring""" lowerCamelCase = hans_processors[task]() lowerCamelCase = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase , lowerCamelCase = label_list[2], label_list[1] lowerCamelCase = label_list lowerCamelCase = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a ) lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ): if ex_index % 10_000 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowerCamelCase = tf.data.Dataset.from_generator( _a , ( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) , ( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowerCAmelCase ( self ): """simple docstring""" return self.dataset def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _a ): """simple docstring""" return self.features[i] def _lowerCAmelCase ( self ): """simple docstring""" return self.label_list class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def _lowerCAmelCase ( self , _a ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" ) def _lowerCAmelCase ( self , _a ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" ) def _lowerCAmelCase ( self ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = [] for i, line in enumerate(_a ): if i == 0: continue lowerCamelCase = """%s-%s""" % (set_type, line[0]) lowerCamelCase = line[5] lowerCamelCase = line[6] lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7] lowerCamelCase = line[0] examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) ) return examples def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple: lowerCamelCase = {label: i for i, label in enumerate(snake_case__ )} lowerCamelCase = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case__ ) , desc="""convert examples to features""" ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d""" % (ex_index) ) lowerCamelCase = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , ) lowerCamelCase = label_map[example.label] if example.label in label_map else 0 lowerCamelCase = int(example.pairID ) features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__ ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(F'guid: {example}' ) logger.info(F'features: {features[i]}' ) return features lowerCAmelCase : List[str] = { """hans""": 3, } lowerCAmelCase : str = { """hans""": HansProcessor, }
168
0
'''simple docstring''' _UpperCamelCase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _UpperCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _UpperCamelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
254
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ): """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release: # old versions of hfh don't url-encode the file path __UpperCAmelCase : Optional[Any] = quote(lowerCAmelCase__ ) return hfh.hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" , revision=lowerCAmelCase__ )
254
1
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(a_ , 2 ) - pow(a_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(a_ , 2 ) - pow(a_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(a_ , 2 ) + pow(a_ , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
356
'''simple docstring''' from statistics import mean, stdev def UpperCAmelCase ( a_ , a_ = 3 ) -> list: """simple docstring""" A_ : Tuple = min(a_ ) A_ : Union[str, Any] = max(a_ ) # normalize data return [round((x - x_min) / (x_max - x_min) , a_ ) for x in data] def UpperCAmelCase ( a_ , a_ = 3 ) -> list: """simple docstring""" A_ : List[str] = mean(a_ ) A_ : List[str] = stdev(a_ ) # standardize data return [round((x - mu) / (sigma) , a_ ) for x in data]
164
0
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a : def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : List[Any]=30 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=2 , ) -> str: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = scope lowerCamelCase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase_ = (image_size // patch_size) ** 2 lowerCamelCase_ = num_patches + 2 def UpperCamelCase ( self : List[Any] ) -> Optional[Any]: lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self : Optional[int] ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: lowerCamelCase_ = DeiTModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: lowerCamelCase_ = DeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = DeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ) -> Dict: lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = DeiTForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = DeiTForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self : str ) -> int: lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( __snake_case , __snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE : Tuple = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : List[str] = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : int = False def UpperCamelCase ( self : Union[str, Any] ) -> str: lowerCamelCase_ = DeiTModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCamelCase ( self : Dict ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def UpperCamelCase ( self : List[Any] ) -> Optional[int]: pass def UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCamelCase ( self : str ) -> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Dict ) -> Tuple: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Tuple ) -> int: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Dict ) -> Optional[Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> Any: lowerCamelCase_ = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase ( self : str ) -> int: if not self.model_tester.is_training: return lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__SCREAMING_SNAKE_CASE ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.train() lowerCamelCase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ).loss loss.backward() def UpperCamelCase ( self : Optional[Any] ) -> int: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCamelCase_ = False lowerCamelCase_ = True for model_class in self.all_model_classes: if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.to(__SCREAMING_SNAKE_CASE ) model.train() lowerCamelCase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ).loss loss.backward() def UpperCamelCase ( self : Union[str, Any] ) -> Tuple: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__SCREAMING_SNAKE_CASE ), *get_values(__SCREAMING_SNAKE_CASE ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): lowerCamelCase_ = problem_type['title'] lowerCamelCase_ = problem_type['num_labels'] lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.train() lowerCamelCase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) if problem_type["num_labels"] > 1: lowerCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) lowerCamelCase_ = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as warning_list: lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def UpperCamelCase ( self : List[str] ) -> Tuple: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = DeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( ) -> Any: lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def UpperCamelCase ( self : Union[str, Any] ) -> Tuple: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def UpperCamelCase ( self : Optional[Any] ) -> int: lowerCamelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCamelCase ( self : str ) -> Dict: lowerCamelCase_ = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowerCamelCase_ = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
183
"""simple docstring""" from math import pow, sqrt def lowerCamelCase__ ( *_lowerCamelCase : float ) -> bool: lowerCamelCase_ = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values ) return result def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError: return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(_lowerCamelCase , _lowerCamelCase ) else ValueError('Input Error: Molar mass values must greater than 0.' ) ) def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError: return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError: return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError: return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError: return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) )
183
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : int ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ): self.assertAlmostEqual(_UpperCAmelCase ,_UpperCAmelCase ,delta=_UpperCAmelCase ) def __lowercase ( self : int ): _a : int = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_UpperCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step ,3 ) self.assertEqual(len(accumulator.gradients ) ,1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step ,0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 ) def __lowercase ( self : Any ): _a : int = None ops.enable_eager_execution_internal() _a : Optional[int] = tf.config.list_physical_devices('CPU' ) if len(_UpperCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _a : Tuple = tf.config.list_logical_devices(device_type='CPU' ) _a : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _a : Tuple = GradientAccumulator() _a : List[Any] = tf.Variable([4.0, 3.0] ) _a , _a : Dict = create_optimizer(5E-5 ,10 ,5 ) _a : Tuple = tf.Variable([0.0, 0.0] ,trainable=_UpperCAmelCase ) def accumulate_on_replica(_UpperCAmelCase : str ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) ) @tf.function def accumulate(_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ): with strategy.scope(): _a : Union[str, Any] = strategy.experimental_local_results(_UpperCAmelCase ) local_variables[0].assign(_UpperCAmelCase ) local_variables[1].assign(_UpperCAmelCase ) strategy.run(_UpperCAmelCase ,args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_UpperCAmelCase ) def _check_local_values(_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ): _a : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() ,_UpperCAmelCase ,tol=1E-2 ) self.assertListAlmostEqual(values[1].value() ,_UpperCAmelCase ,tol=1E-2 ) accumulate([1.0, 2.0] ,[-1.0, 1.0] ) accumulate([3.0, -1.0] ,[-1.0, -1.0] ) accumulate([-2.0, 2.0] ,[3.0, -2.0] ) self.assertEqual(accumulator.step ,3 ) _check_local_values([2.0, 3.0] ,[1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step ,0 ) _check_local_values([0.0, 0.0] ,[0.0, 0.0] )
107
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = { '''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''], '''configuration_data2vec_text''': [ '''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecTextConfig''', '''Data2VecTextOnnxConfig''', ], '''configuration_data2vec_vision''': [ '''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecVisionConfig''', '''Data2VecVisionOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecAudioForAudioFrameClassification''', '''Data2VecAudioForCTC''', '''Data2VecAudioForSequenceClassification''', '''Data2VecAudioForXVector''', '''Data2VecAudioModel''', '''Data2VecAudioPreTrainedModel''', ] __lowerCAmelCase = [ '''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecTextForCausalLM''', '''Data2VecTextForMaskedLM''', '''Data2VecTextForMultipleChoice''', '''Data2VecTextForQuestionAnswering''', '''Data2VecTextForSequenceClassification''', '''Data2VecTextForTokenClassification''', '''Data2VecTextModel''', '''Data2VecTextPreTrainedModel''', ] __lowerCAmelCase = [ '''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecVisionForImageClassification''', '''Data2VecVisionForMaskedImageModeling''', '''Data2VecVisionForSemanticSegmentation''', '''Data2VecVisionModel''', '''Data2VecVisionPreTrainedModel''', ] if is_tf_available(): __lowerCAmelCase = [ '''TFData2VecVisionForImageClassification''', '''TFData2VecVisionForSemanticSegmentation''', '''TFData2VecVisionModel''', '''TFData2VecVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
107
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Any = logging.get_logger(__name__) snake_case : Dict = { "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json", } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = "roc_bert" def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=768 , _lowerCamelCase=910 , _lowerCamelCase=512 , _lowerCamelCase=2_4858 , _lowerCamelCase=True , **_lowerCamelCase , ): a :List[str] = vocab_size a :Any = max_position_embeddings a :Dict = hidden_size a :int = num_hidden_layers a :Optional[Any] = num_attention_heads a :Optional[int] = intermediate_size a :Dict = hidden_act a :Tuple = hidden_dropout_prob a :Optional[int] = attention_probs_dropout_prob a :Dict = initializer_range a :Optional[Any] = type_vocab_size a :str = layer_norm_eps a :Tuple = use_cache a :Optional[int] = enable_pronunciation a :Union[str, Any] = enable_shape a :List[str] = pronunciation_embed_dim a :List[str] = pronunciation_vocab_size a :int = shape_embed_dim a :Optional[int] = shape_vocab_size a :Optional[Any] = concat_input a :Dict = position_embedding_type a :Union[str, Any] = classifier_dropout super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
94
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") snake_case_ : List[str] = logging.getLogger(__name__) @dataclass class __a : __a : Optional[str] = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) __a : Optional[str] = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) __a : int = field( default=1_024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __a : bool = field( default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) __a : bool = field( default=lowerCamelCase , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "A csv or a json file containing the training data."} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} ) __a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "A csv or a json file containing the test data."} ) def UpperCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' ) else: UpperCAmelCase_ : Dict = self.train_file.split('''.''' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." UpperCAmelCase_ : Union[str, Any] = self.validation_file.split('''.''' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __a : __a : str = field( default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __a : bool = field( default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __a : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __a : bool = field( default=lowerCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def lowerCamelCase_ ( ) -> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], ) UpperCAmelCase_ : List[str] = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase_ : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCAmelCase_ : List[Any] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. UpperCAmelCase_ : Dict = {'''train''': data_args.train_file, '''validation''': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: UpperCAmelCase_ : Dict = data_args.train_file.split('''.''' )[-1] UpperCAmelCase_ : Union[str, Any] = data_args.test_file.split('''.''' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." UpperCAmelCase_ : int = data_args.test_file else: raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' ) for key in data_files.keys(): logger.info(F"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith('''.csv''' ): # Loading a dataset from local csv files UpperCAmelCase_ : List[Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files UpperCAmelCase_ : int = load_dataset('''json''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels UpperCAmelCase_ : Optional[Any] = raw_datasets['''train'''].features['''label'''].names UpperCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # load tapex tokenizer UpperCAmelCase_ : str = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase_ : Optional[int] = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase_ : Dict = False # Some models have set the order of the labels to use, so let's make sure we do use it. UpperCAmelCase_ : Tuple = {'''Refused''': 0, '''Entailed''': 1} UpperCAmelCase_ : Tuple = {0: '''Refused''', 1: '''Entailed'''} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCAmelCase_ : int = min(data_args.max_seq_length, tokenizer.model_max_length ) def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # Tokenize the texts def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ : Tuple ): UpperCAmelCase_ : List[str] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )] UpperCAmelCase_ : Any = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] ) return _table_pd UpperCAmelCase_ : Optional[Any] = examples['''statement'''] UpperCAmelCase_ : Union[str, Any] = list(map(_convert_table_text_to_pandas, examples['''table_text'''] ) ) UpperCAmelCase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, padding=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, truncation=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = examples['''label'''] return result with training_args.main_process_first(desc='''dataset map pre-processing''' ): UpperCAmelCase_ : List[str] = raw_datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on dataset''', ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase_ : Any = raw_datasets['''train'''] if data_args.max_train_samples is not None: UpperCAmelCase_ : Dict = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase_ : str = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: UpperCAmelCase_ : Any = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('''--do_predict requires a test dataset''' ) UpperCAmelCase_ : Dict = raw_datasets['''test'''] if data_args.max_predict_samples is not None: UpperCAmelCase_ : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ): UpperCAmelCase_ : Any = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions UpperCAmelCase_ : Optional[int] = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase_ : Optional[Any] = default_data_collator elif training_args.fpaa: UpperCAmelCase_ : str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 ) else: UpperCAmelCase_ : List[Any] = None # Initialize our Trainer UpperCAmelCase_ : int = Trainer( model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, ) # Training if training_args.do_train: UpperCAmelCase_ : Dict = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase_ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase_ : Optional[int] = last_checkpoint UpperCAmelCase_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = train_result.metrics UpperCAmelCase_ : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : List[Any] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ) trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. UpperCAmelCase_ : Optional[int] = predict_dataset.remove_columns('''label''' ) UpperCAmelCase_ : Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' ).predictions UpperCAmelCase_ : Any = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 ) UpperCAmelCase_ : int = os.path.join(training_args.output_dir, '''predict_results_tabfact.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer: logger.info('''***** Predict Results *****''' ) writer.write('''index\tprediction\n''' ) for index, item in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Dict = label_list[item] writer.write(F"""{index}\t{item}\n""" ) UpperCAmelCase_ : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''} if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
125
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowercase__ = None lowercase__ = logging.get_logger(__name__) lowercase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowercase__ = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } lowercase__ = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } lowercase__ = """▁""" class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : Union[str, Any] = VOCAB_FILES_NAMES a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP a_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : str = BigBirdTokenizer a_ : Optional[int] = ["""input_ids""", """attention_mask"""] a_ : List[int] = [] def __init__( self : str , a_ : Optional[Any]=None , a_ : Tuple=None , a_ : Any="<unk>" , a_ : Optional[Any]="<s>" , a_ : str="</s>" , a_ : Any="<pad>" , a_ : int="[SEP]" , a_ : str="[MASK]" , a_ : str="[CLS]" , **a_ : List[str] , ): lowerCAmelCase_ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else bos_token lowerCAmelCase_ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token lowerCAmelCase_ : Any = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token lowerCAmelCase_ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token lowerCAmelCase_ : Any = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else cls_token lowerCAmelCase_ : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , ) lowerCAmelCase_ : Any = vocab_file lowerCAmelCase_ : Union[str, Any] = False if not self.vocab_file else True def lowerCamelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ): lowerCAmelCase_ : List[Any] = [self.sep_token_id] lowerCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1] def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ): lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : str , a_ : str , a_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(a_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase_ : Union[str, Any] = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
161
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , a_ : List[str] , a_ : Tuple=7 , a_ : Any=3 , a_ : Union[str, Any]=18 , a_ : List[str]=30 , a_ : List[str]=4_00 , a_ : str=True , a_ : Tuple=None , a_ : str=True , a_ : Optional[int]=None , ): lowerCAmelCase_ : Any = size if size is not None else {"shortest_edge": 20} lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18} lowerCAmelCase_ : int = parent lowerCAmelCase_ : Dict = batch_size lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : str = image_size lowerCAmelCase_ : int = min_resolution lowerCAmelCase_ : Tuple = max_resolution lowerCAmelCase_ : str = do_resize lowerCAmelCase_ : List[Any] = size lowerCAmelCase_ : Any = do_center_crop lowerCAmelCase_ : Tuple = crop_size def lowerCamelCase ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : int = MobileNetVaImageProcessingTester(self ) @property def lowerCamelCase ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "do_center_crop" ) ) self.assertTrue(hasattr(a_ , "crop_size" ) ) def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def lowerCamelCase ( self : Tuple ): pass def lowerCamelCase ( self : Any ): # Initialize image_processing lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCamelCase ( self : str ): # Initialize image_processing lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : str = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
161
1