code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): if digit_amount > 0: return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) return number - int(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
630
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin _A = logging.get_logger(__name__) enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ): __a : List[Any] = UNetaDModel __a : Union[str, Any] = '''sample''' @property def _snake_case ( self ) -> Optional[int]: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[Any] = 4 __SCREAMING_SNAKE_CASE : Dict = 3 __SCREAMING_SNAKE_CASE : Union[str, Any] = (3_2, 3_2) __SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1_0] ).to(lowercase ) return {"sample": noise, "timestep": time_step} @property def _snake_case ( self ) -> Tuple: '''simple docstring''' return (3, 3_2, 3_2) @property def _snake_case ( self ) -> List[Any]: '''simple docstring''' return (3, 3_2, 3_2) def _snake_case ( self ) -> Optional[int]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''block_out_channels''': (3_2, 6_4), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 3_2, } __SCREAMING_SNAKE_CASE : int = self.dummy_input return init_dict, inputs_dict class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ): __a : str = UNetaDModel __a : Optional[Any] = '''sample''' @property def _snake_case ( self ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE : Any = 4 __SCREAMING_SNAKE_CASE : Any = 4 __SCREAMING_SNAKE_CASE : str = (3_2, 3_2) __SCREAMING_SNAKE_CASE : int = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1_0] ).to(lowercase ) return {"sample": noise, "timestep": time_step} @property def _snake_case ( self ) -> Any: '''simple docstring''' return (4, 3_2, 3_2) @property def _snake_case ( self ) -> int: '''simple docstring''' return (4, 3_2, 3_2) def _snake_case ( self ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[str] = { '''sample_size''': 3_2, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (3_2, 6_4), '''attention_head_dim''': 3_2, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } __SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input return init_dict, inputs_dict def _snake_case ( self ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowercase ) __SCREAMING_SNAKE_CASE : int = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def _snake_case ( self ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase ) model.to(lowercase ) __SCREAMING_SNAKE_CASE : int = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def _snake_case ( self ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase ) model_accelerate.to(lowercase ) model_accelerate.eval() __SCREAMING_SNAKE_CASE : str = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE : str = noise.to(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1_0] * noise.shape[0] ).to(lowercase ) __SCREAMING_SNAKE_CASE : int = model_accelerate(lowercase , lowercase )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase , low_cpu_mem_usage=lowercase ) model_normal_load.to(lowercase ) model_normal_load.eval() __SCREAMING_SNAKE_CASE : List[Any] = model_normal_load(lowercase , lowercase )['''sample'''] assert torch_all_close(lowercase , lowercase , rtol=1e-3 ) def _snake_case ( self ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(lowercase ) __SCREAMING_SNAKE_CASE : Dict = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE : Tuple = noise.to(lowercase ) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1_0] * noise.shape[0] ).to(lowercase ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase , lowercase ).sample __SCREAMING_SNAKE_CASE : List[str] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __SCREAMING_SNAKE_CASE : Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-3 ) ) class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ): __a : List[str] = UNetaDModel __a : Any = '''sample''' @property def _snake_case ( self , lowercase=(3_2, 3_2) ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 4 __SCREAMING_SNAKE_CASE : int = 3 __SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) __SCREAMING_SNAKE_CASE : Any = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=lowercase ) return {"sample": noise, "timestep": time_step} @property def _snake_case ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 3_2, 3_2) @property def _snake_case ( self ) -> List[str]: '''simple docstring''' return (3, 3_2, 3_2) def _snake_case ( self ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[Any] = { '''block_out_channels''': [3_2, 6_4, 6_4, 6_4], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } __SCREAMING_SNAKE_CASE : int = self.dummy_input return init_dict, inputs_dict @slow def _snake_case ( self ) -> Any: '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowercase ) __SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input __SCREAMING_SNAKE_CASE : str = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = noise __SCREAMING_SNAKE_CASE : Dict = model(**lowercase ) assert image is not None, "Make sure output is not None" @slow def _snake_case ( self ) -> Optional[int]: '''simple docstring''' __SCREAMING_SNAKE_CASE : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(lowercase ) __SCREAMING_SNAKE_CASE : Any = 4 __SCREAMING_SNAKE_CASE : Dict = 3 __SCREAMING_SNAKE_CASE : Tuple = (2_5_6, 2_5_6) __SCREAMING_SNAKE_CASE : Dict = torch.ones((batch_size, num_channels) + sizes ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = torch.tensor(batch_size * [1e-4] ).to(lowercase ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Any = model(lowercase , lowercase ).sample __SCREAMING_SNAKE_CASE : int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off __SCREAMING_SNAKE_CASE : str = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) def _snake_case ( self ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(lowercase ) __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : str = 3 __SCREAMING_SNAKE_CASE : Optional[Any] = (3_2, 3_2) __SCREAMING_SNAKE_CASE : str = torch.ones((batch_size, num_channels) + sizes ).to(lowercase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(lowercase ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Dict = model(lowercase , lowercase ).sample __SCREAMING_SNAKE_CASE : str = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) def _snake_case ( self ) -> str: '''simple docstring''' pass
158
0
'''simple docstring''' import unittest from knapsack import knapsack as k class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ): """simple docstring""" A_ : Tuple = 0 A_ : Union[str, Any] = [0] A_ : Any = [0] A_ : Tuple = len(__magic_name__ ) self.assertEqual(k.knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , 0 ) A_ : str = [60] A_ : Dict = [10] A_ : List[str] = len(__magic_name__ ) self.assertEqual(k.knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , 0 ) def UpperCAmelCase ( self ): """simple docstring""" A_ : int = 3 A_ : Tuple = [1, 2, 3] A_ : Dict = [3, 2, 1] A_ : Union[str, Any] = len(__magic_name__ ) self.assertEqual(k.knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , 5 ) def UpperCAmelCase ( self ): """simple docstring""" A_ : Tuple = 50 A_ : Any = [60, 100, 120] A_ : Optional[int] = [10, 20, 30] A_ : str = len(__magic_name__ ) self.assertEqual(k.knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , 220 ) if __name__ == "__main__": unittest.main()
718
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) def a__ ( a , a ) -> Optional[Any]: A_ : Union[str, Any] = nn.functional.normalize(a ) A_ : str = nn.functional.normalize(a ) return torch.mm(a , normalized_text_embeds.t() ) class __UpperCAmelCase( A__ ): """simple docstring""" __magic_name__ = CLIPConfig __magic_name__ = ["""CLIPEncoderLayer"""] def __init__( self , __magic_name__ ): """simple docstring""" super().__init__(__magic_name__ ) A_ : str = CLIPVisionModel(config.vision_config ) A_ : List[Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ ) A_ : Union[str, Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__magic_name__ ) A_ : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ ) A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=__magic_name__ ) A_ : str = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ ) @torch.no_grad() def UpperCAmelCase ( self , __magic_name__ , __magic_name__ ): """simple docstring""" A_ : Optional[Any] = self.vision_model(__magic_name__ )[1] # pooled_output A_ : Optional[int] = self.visual_projection(__magic_name__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : str = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy() A_ : Dict = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy() A_ : Tuple = [] A_ : int = image_embeds.shape[0] for i in range(__magic_name__ ): A_ : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A_ : Union[str, Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A_ : int = special_cos_dist[i][concept_idx] A_ : str = self.special_care_embeds_weights[concept_idx].item() A_ : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) A_ : Dict = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A_ : List[Any] = cos_dist[i][concept_idx] A_ : Tuple = self.concept_embeds_weights[concept_idx].item() A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__magic_name__ ) result.append(__magic_name__ ) A_ : Dict = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCAmelCase ( self , __magic_name__ , __magic_name__ ): """simple docstring""" A_ : Optional[int] = self.vision_model(__magic_name__ )[1] # pooled_output A_ : Dict = self.visual_projection(__magic_name__ ) A_ : Union[str, Any] = cosine_distance(__magic_name__ , self.special_care_embeds ) A_ : Any = cosine_distance(__magic_name__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A_ : Union[str, Any] = 0.0 A_ : str = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A_ : Union[str, Any] = torch.any(special_scores > 0 , dim=1 ) A_ : Tuple = special_care * 0.01 A_ : List[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A_ : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A_ : str = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
236
0
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. A : Optional[Any] = 10 def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: '''simple docstring''' for i in range(_lowerCAmelCase , _lowerCAmelCase ): if array[i] == target: return i return -1 def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int: '''simple docstring''' __snake_case = 0 __snake_case = len(_lowerCAmelCase ) while left <= right: if right - left < precision: return lin_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) __snake_case = (left + right) // 3 + 1 __snake_case = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __snake_case = one_third - 1 elif array[two_third] < target: __snake_case = two_third + 1 else: __snake_case = one_third + 1 __snake_case = two_third - 1 else: return -1 def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: '''simple docstring''' if left < right: if right - left < precision: return lin_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) __snake_case = (left + right) // 3 + 1 __snake_case = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_lowerCAmelCase , one_third - 1 , _lowerCAmelCase , _lowerCAmelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCAmelCase , _lowerCAmelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() A : Union[str, Any] = input('Enter numbers separated by comma:\n').strip() A : Any = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." A : Any = int(input('Enter the number to be found in the list:\n').strip()) A : Optional[int] = ite_ternary_search(collection, target) A : int = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print('Not found')
371
from __future__ import annotations def _lowerCAmelCase ( _lowerCAmelCase ) -> list[int]: '''simple docstring''' __snake_case = 2 __snake_case = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_lowerCAmelCase ) if n > 1: factors.append(_lowerCAmelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
371
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __UpperCAmelCase = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(__SCREAMING_SNAKE_CASE ) class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ ="rag" UpperCAmelCase_ =True def __init__( self , _A=None , _A=True , _A=None , _A=None , _A=None , _A=None , _A=None , _A=" / " , _A=" // " , _A=5 , _A=300 , _A=768 , _A=8 , _A="wiki_dpr" , _A="train" , _A="compressed" , _A=None , _A=None , _A=False , _A=False , _A=0.0 , _A=True , _A=False , _A=False , _A=False , _A=True , _A=None , **_A , ) -> int: super().__init__( bos_token_id=_A , pad_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , is_encoder_decoder=_A , prefix=_A , vocab_size=_A , **_A , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" SCREAMING_SNAKE_CASE_ = kwargs.pop('''question_encoder''' ) SCREAMING_SNAKE_CASE_ = question_encoder_config.pop('''model_type''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''generator''' ) SCREAMING_SNAKE_CASE_ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(_A , **_A ) SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(_A , **_A ) SCREAMING_SNAKE_CASE_ = reduce_loss SCREAMING_SNAKE_CASE_ = label_smoothing SCREAMING_SNAKE_CASE_ = exclude_bos_score SCREAMING_SNAKE_CASE_ = do_marginalize SCREAMING_SNAKE_CASE_ = title_sep SCREAMING_SNAKE_CASE_ = doc_sep SCREAMING_SNAKE_CASE_ = n_docs SCREAMING_SNAKE_CASE_ = max_combined_length SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = dataset_split SCREAMING_SNAKE_CASE_ = index_name SCREAMING_SNAKE_CASE_ = retrieval_vector_size SCREAMING_SNAKE_CASE_ = retrieval_batch_size SCREAMING_SNAKE_CASE_ = passages_path SCREAMING_SNAKE_CASE_ = index_path SCREAMING_SNAKE_CASE_ = use_dummy_dataset SCREAMING_SNAKE_CASE_ = output_retrieved SCREAMING_SNAKE_CASE_ = do_deduplication SCREAMING_SNAKE_CASE_ = use_cache if self.forced_eos_token_id is None: SCREAMING_SNAKE_CASE_ = getattr(self.generator , '''forced_eos_token_id''' , _A ) @classmethod def _UpperCamelCase ( cls , _A , _A , **_A ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_A ) def _UpperCamelCase ( self ) -> Dict: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.question_encoder.to_dict() SCREAMING_SNAKE_CASE_ = self.generator.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
597
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ =KandinskyVaaImgaImgPipeline UpperCAmelCase_ =["image_embeds", "negative_image_embeds", "image"] UpperCAmelCase_ =[ "image_embeds", "negative_image_embeds", "image", ] UpperCAmelCase_ =[ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] UpperCAmelCase_ =False @property def _UpperCamelCase ( self ) -> Optional[Any]: return 32 @property def _UpperCamelCase ( self ) -> Tuple: return 32 @property def _UpperCamelCase ( self ) -> List[Any]: return self.time_input_dim @property def _UpperCamelCase ( self ) -> Tuple: return self.time_input_dim * 4 @property def _UpperCamelCase ( self ) -> List[Any]: return 100 @property def _UpperCamelCase ( self ) -> int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**_A ) return model @property def _UpperCamelCase ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _UpperCamelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs ) return model def _UpperCamelCase ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ = self.dummy_unet SCREAMING_SNAKE_CASE_ = self.dummy_movq SCREAMING_SNAKE_CASE_ = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE_ = DDIMScheduler(**_A ) SCREAMING_SNAKE_CASE_ = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _UpperCamelCase ( self , _A , _A=0 ) -> Dict: SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A ) SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _A ) # create init_image SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) ) if str(_A ).startswith('''mps''' ): SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A ) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A ) SCREAMING_SNAKE_CASE_ = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _UpperCamelCase ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ = '''cpu''' SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A ) SCREAMING_SNAKE_CASE_ = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_A ) ) SCREAMING_SNAKE_CASE_ = output.images SCREAMING_SNAKE_CASE_ = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ = np.array( [0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE_ = '''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) SCREAMING_SNAKE_CASE_ = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE_ = pipeline( image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_A , _A )
597
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] , snake_case_ : Tuple , snake_case_ : int=13 , snake_case_ : int=32 , snake_case_ : int=3 , snake_case_ : Tuple=4 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : List[str]=37 , snake_case_ : Tuple="gelu" , snake_case_ : int=10 , snake_case_ : str=0.02 , snake_case_ : int=["stage2", "stage3", "stage4"] , snake_case_ : List[Any]=[2, 3, 4] , snake_case_ : List[Any]=None , ): UpperCamelCase_: int = parent UpperCamelCase_: str = batch_size UpperCamelCase_: Tuple = image_size UpperCamelCase_: Dict = num_channels UpperCamelCase_: Any = num_stages UpperCamelCase_: Optional[int] = hidden_sizes UpperCamelCase_: str = depths UpperCamelCase_: Any = is_training UpperCamelCase_: Union[str, Any] = use_labels UpperCamelCase_: Dict = intermediate_size UpperCamelCase_: Union[str, Any] = hidden_act UpperCamelCase_: List[Any] = num_labels UpperCamelCase_: Tuple = initializer_range UpperCamelCase_: Optional[Any] = out_features UpperCamelCase_: Optional[int] = out_indices UpperCamelCase_: List[str] = scope def lowerCAmelCase__ ( self : Tuple ): UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_: Optional[Any] = None if self.use_labels: UpperCamelCase_: Tuple = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase_: List[str] = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Tuple ): UpperCamelCase_: Optional[int] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase_: int = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ): UpperCamelCase_: int = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase_: Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): UpperCamelCase_: List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase_: Dict = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase_: str = None UpperCamelCase_: Any = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase_: Dict = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCAmelCase__ ( self : Tuple ): UpperCamelCase_: List[str] = self.prepare_config_and_inputs() UpperCamelCase_: Dict = config_and_inputs UpperCamelCase_: str = {'''pixel_values''': pixel_values} return config, inputs_dict def lowerCAmelCase__ ( self : List[str] ): UpperCamelCase_: Dict = self.prepare_config_and_inputs() UpperCamelCase_: Any = config_and_inputs UpperCamelCase_: int = {'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class _UpperCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : int = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __UpperCamelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) __UpperCamelCase : int = False __UpperCamelCase : int = False __UpperCamelCase : Tuple = False __UpperCamelCase : Tuple = False __UpperCamelCase : Dict = False def lowerCAmelCase__ ( self : Any ): UpperCamelCase_: int = ConvNextVaModelTester(self ) UpperCamelCase_: str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase__ ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase__ ( self : str ): return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def lowerCAmelCase__ ( self : int ): pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def lowerCAmelCase__ ( self : Optional[int] ): pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def lowerCAmelCase__ ( self : Tuple ): pass def lowerCAmelCase__ ( self : Optional[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() UpperCamelCase_: List[str] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue UpperCamelCase_: List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() UpperCamelCase_: int = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) UpperCamelCase_: Tuple = model(**__lowerCamelCase ).loss loss.backward() def lowerCAmelCase__ ( self : List[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() UpperCamelCase_: Optional[int] = False UpperCamelCase_: Dict = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue UpperCamelCase_: int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() UpperCamelCase_: List[str] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) UpperCamelCase_: Tuple = model(**__lowerCamelCase ).loss loss.backward() def lowerCAmelCase__ ( self : int ): UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_: Dict = model_class(__lowerCamelCase ) UpperCamelCase_: Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_: Union[str, Any] = [*signature.parameters.keys()] UpperCamelCase_: str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def lowerCAmelCase__ ( self : int ): UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase__ ( self : Tuple ): def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Dict ): UpperCamelCase_: Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase_: Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) UpperCamelCase_: Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase_: Tuple = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_: Tuple = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase_: Tuple = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase__ ( self : Optional[Any] ): UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def lowerCAmelCase__ ( self : Dict ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_: Tuple = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def A__ ( ) -> int: UpperCamelCase_: Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase__ ( self : str ): return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self : Tuple ): UpperCamelCase_: Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__lowerCamelCase ) UpperCamelCase_: Optional[Any] = self.default_image_processor UpperCamelCase_: Any = prepare_img() UpperCamelCase_: Tuple = preprocessor(images=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): UpperCamelCase_: Optional[int] = model(**__lowerCamelCase ) # verify the logits UpperCamelCase_: Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) UpperCamelCase_: Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
548
class UpperCamelCase__ : def __init__( self : str, __lowerCamelCase : Optional[Any] ) -> str: UpperCamelCase__ : Dict = val UpperCamelCase__ : Dict = None UpperCamelCase__ : Union[str, Any] = None def __lowercase( self : Optional[int], __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: if self.val: if val < self.val: if self.left is None: UpperCamelCase__ : int = Node(__lowerCamelCase ) else: self.left.insert(__lowerCamelCase ) elif val > self.val: if self.right is None: UpperCamelCase__ : Dict = Node(__lowerCamelCase ) else: self.right.insert(__lowerCamelCase ) else: UpperCamelCase__ : int = val def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ) -> Optional[int]: '''simple docstring''' if root: inorder(root.left ,__lowerCamelCase ) res.append(root.val ) inorder(root.right ,__lowerCamelCase ) def _lowercase ( __lowerCamelCase : Dict ) -> Optional[int]: '''simple docstring''' if len(__lowerCamelCase ) == 0: return arr UpperCamelCase__ : Tuple = Node(arr[0] ) for i in range(1 ,len(__lowerCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. UpperCamelCase__ : List[Any] = [] inorder(__lowerCamelCase ,__lowerCamelCase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
344
0
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _snake_case ( lowerCAmelCase__ ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): super().__init__(features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Tuple = Sql( cache_dir=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , sql=_SCREAMING_SNAKE_CASE , con=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : str = None UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : List[Any] = None self.builder.download_and_prepare( download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , ) # Build dataset for splits UpperCAmelCase__ : Dict = self.builder.as_dataset( split="""train""" , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''') UpperCAmelCase__ : Optional[Any] = dataset UpperCAmelCase__ : Any = name UpperCAmelCase__ : Dict = con UpperCAmelCase__ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase__ : Any = num_proc UpperCAmelCase__ : Union[str, Any] = to_sql_kwargs def snake_case__ ( self): UpperCAmelCase__ : str = self.to_sql_kwargs.pop("""sql""" , _SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Tuple = self.to_sql_kwargs.pop("""con""" , _SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Dict = self.to_sql_kwargs.pop("""index""" , _SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Tuple = self._write(index=_SCREAMING_SNAKE_CASE , **self.to_sql_kwargs) return written def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = args UpperCAmelCase__ : Union[str, Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs UpperCAmelCase__ : Dict = query_table( table=self.dataset.data , key=slice(_SCREAMING_SNAKE_CASE , offset + self.batch_size) , indices=self.dataset._indices , ) UpperCAmelCase__ : List[str] = batch.to_pandas() UpperCAmelCase__ : int = df.to_sql(self.name , self.con , index=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) return num_rows or len(_SCREAMING_SNAKE_CASE) def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase): UpperCAmelCase__ : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs)) else: UpperCAmelCase__ , UpperCAmelCase__ : str = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
709
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __A ='scheduler_config.json' class _snake_case ( a__ ): lowerCAmelCase :Union[str, Any] = 1 lowerCAmelCase :Union[str, Any] = 2 lowerCAmelCase :Optional[int] = 3 lowerCAmelCase :Optional[int] = 4 lowerCAmelCase :Any = 5 lowerCAmelCase :Tuple = 6 lowerCAmelCase :List[Any] = 7 lowerCAmelCase :str = 8 lowerCAmelCase :List[Any] = 9 lowerCAmelCase :List[str] = 10 lowerCAmelCase :Union[str, Any] = 11 lowerCAmelCase :Optional[int] = 12 lowerCAmelCase :str = 13 lowerCAmelCase :Dict = 14 @dataclass class _snake_case ( a__ ): lowerCAmelCase :torch.FloatTensor class _snake_case : lowerCAmelCase :str = SCHEDULER_CONFIG_NAME lowerCAmelCase :Union[str, Any] = [] lowerCAmelCase :List[str] = True @classmethod def snake_case__ ( cls , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , **_lowerCamelCase , ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.load_config( pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , return_commit_hash=_lowerCamelCase , **_lowerCamelCase , ) return cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , **_lowerCamelCase): self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase) @property def snake_case__ ( self): return self._get_compatibles() @classmethod def snake_case__ ( cls): UpperCAmelCase__ : List[Any] = list(set([cls.__name__] + cls._compatibles)) UpperCAmelCase__ : int = importlib.import_module(__name__.split(""".""")[0]) UpperCAmelCase__ : Dict = [ getattr(_lowerCamelCase , _lowerCamelCase) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase) ] return compatible_classes
113
0
"""simple docstring""" from itertools import permutations def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCAmelCase__ : List[str] = [7, 11, 13, 17] for i, test in enumerate(__UpperCamelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase ( __UpperCamelCase = 10 ): '''simple docstring''' return sum( int("""""".join(map(__UpperCamelCase , __UpperCamelCase ) ) ) for num in permutations(range(__UpperCamelCase ) ) if is_substring_divisible(__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : List[str] = 'gpt_bigcode' __UpperCAmelCase : Tuple = ['past_key_values'] __UpperCAmelCase : Dict = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ): __a = vocab_size __a = n_positions __a = n_embd __a = n_layer __a = n_head __a = n_inner __a = activation_function __a = resid_pdrop __a = embd_pdrop __a = attn_pdrop __a = layer_norm_epsilon __a = initializer_range __a = scale_attn_weights __a = use_cache __a = attention_softmax_in_fpaa __a = scale_attention_softmax_in_fpaa __a = multi_query __a = bos_token_id __a = eos_token_id super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
695
0
"""simple docstring""" import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def snake_case__ ( __lowerCamelCase : Optional[int] ): """simple docstring""" random.seed(lowerCAmelCase__ ) np.random.seed(lowerCAmelCase__ ) torch.manual_seed(lowerCAmelCase__ ) torch.cuda.manual_seed_all(lowerCAmelCase__ ) # ^^ safe to call this function even if cuda is not available class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any, lowerCamelCase : Iterable[torch.nn.Parameter], lowerCamelCase : float = 0.9_999, lowerCamelCase : float = 0.0, lowerCamelCase : int = 0, lowerCamelCase : bool = False, lowerCamelCase : Union[float, int] = 1.0, lowerCamelCase : Union[float, int] = 2 / 3, lowerCamelCase : Optional[Any] = None, lowerCamelCase : Dict[str, Any] = None, **lowerCamelCase : int, )-> List[Any]: if isinstance(snake_case__, torch.nn.Module ): lowerCamelCase__ : Optional[Any] =( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''', '''1.0.0''', snake_case__, standard_warn=snake_case__, ) lowerCamelCase__ : Union[str, Any] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowerCamelCase__ : Optional[Any] =True if kwargs.get('''max_value''', snake_case__ ) is not None: lowerCamelCase__ : Optional[Any] ='''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''', '''1.0.0''', snake_case__, standard_warn=snake_case__ ) lowerCamelCase__ : List[str] =kwargs['''max_value'''] if kwargs.get('''min_value''', snake_case__ ) is not None: lowerCamelCase__ : str ='''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''', '''1.0.0''', snake_case__, standard_warn=snake_case__ ) lowerCamelCase__ : int =kwargs['''min_value'''] lowerCamelCase__ : List[Any] =list(snake_case__ ) lowerCamelCase__ : Dict =[p.clone().detach() for p in parameters] if kwargs.get('''device''', snake_case__ ) is not None: lowerCamelCase__ : Optional[Any] ='''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''', '''1.0.0''', snake_case__, standard_warn=snake_case__ ) self.to(device=kwargs['''device'''] ) lowerCamelCase__ : List[str] =None lowerCamelCase__ : Tuple =decay lowerCamelCase__ : List[Any] =min_decay lowerCamelCase__ : Optional[int] =update_after_step lowerCamelCase__ : List[str] =use_ema_warmup lowerCamelCase__ : List[Any] =inv_gamma lowerCamelCase__ : List[str] =power lowerCamelCase__ : Optional[Any] =0 lowerCamelCase__ : Dict =None # set in `step()` lowerCamelCase__ : Any =model_cls lowerCamelCase__ : Any =model_config @classmethod def snake_case ( cls : Dict, lowerCamelCase : List[Any], lowerCamelCase : Any )-> Union[str, Any]: lowerCamelCase__ , lowerCamelCase__ : Dict =model_cls.load_config(snake_case__, return_unused_kwargs=snake_case__ ) lowerCamelCase__ : Optional[int] =model_cls.from_pretrained(snake_case__ ) lowerCamelCase__ : Dict =cls(model.parameters(), model_cls=snake_case__, model_config=model.config ) ema_model.load_state_dict(snake_case__ ) return ema_model def snake_case ( self : Optional[Any], lowerCamelCase : List[Any] )-> int: if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowerCamelCase__ : List[Any] =self.model_cls.from_config(self.model_config ) lowerCamelCase__ : List[Any] =self.state_dict() state_dict.pop('''shadow_params''', snake_case__ ) model.register_to_config(**snake_case__ ) self.copy_to(model.parameters() ) model.save_pretrained(snake_case__ ) def snake_case ( self : List[str], lowerCamelCase : int )-> Optional[int]: lowerCamelCase__ : int =max(0, optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowerCamelCase__ : str =1 - (1 + step / self.inv_gamma) ** -self.power else: lowerCamelCase__ : List[Any] =(1 + step) / (10 + step) lowerCamelCase__ : Any =min(snake_case__, self.decay ) # make sure decay is not smaller than min_decay lowerCamelCase__ : Optional[Any] =max(snake_case__, self.min_decay ) return cur_decay_value @torch.no_grad() def snake_case ( self : List[str], lowerCamelCase : Iterable[torch.nn.Parameter] )-> Dict: if isinstance(snake_case__, torch.nn.Module ): lowerCamelCase__ : List[str] =( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''', '''1.0.0''', snake_case__, standard_warn=snake_case__, ) lowerCamelCase__ : Dict =parameters.parameters() lowerCamelCase__ : List[Any] =list(snake_case__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowerCamelCase__ : Optional[Any] =self.get_decay(self.optimization_step ) lowerCamelCase__ : Any =decay lowerCamelCase__ : Tuple =1 - decay lowerCamelCase__ : Tuple =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params, snake_case__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowerCamelCase__ : int =deepspeed.zero.GatheredParameters(snake_case__, modifier_rank=snake_case__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(snake_case__ ) def snake_case ( self : List[Any], lowerCamelCase : Iterable[torch.nn.Parameter] )-> Dict: lowerCamelCase__ : Any =list(snake_case__ ) for s_param, param in zip(self.shadow_params, snake_case__ ): param.data.copy_(s_param.to(param.device ).data ) def snake_case ( self : List[str], lowerCamelCase : int=None, lowerCamelCase : int=None )-> str: lowerCamelCase__ : Dict =[ p.to(device=snake_case__, dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ ) for p in self.shadow_params ] def snake_case ( self : Optional[int] )-> Optional[int]: return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def snake_case ( self : Any, lowerCamelCase : Iterable[torch.nn.Parameter] )-> str: lowerCamelCase__ : Optional[Any] =[param.detach().cpu().clone() for param in parameters] def snake_case ( self : str, lowerCamelCase : Iterable[torch.nn.Parameter] )-> List[Any]: if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params, snake_case__ ): param.data.copy_(c_param.data ) # Better memory-wise. lowerCamelCase__ : int =None def snake_case ( self : Union[str, Any], lowerCamelCase : dict )-> Optional[int]: lowerCamelCase__ : Dict =copy.deepcopy(snake_case__ ) lowerCamelCase__ : List[str] =state_dict.get('''decay''', self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowerCamelCase__ : Dict =state_dict.get('''min_decay''', self.min_decay ) if not isinstance(self.min_decay, snake_case__ ): raise ValueError('''Invalid min_decay''' ) lowerCamelCase__ : Optional[Any] =state_dict.get('''optimization_step''', self.optimization_step ) if not isinstance(self.optimization_step, snake_case__ ): raise ValueError('''Invalid optimization_step''' ) lowerCamelCase__ : str =state_dict.get('''update_after_step''', self.update_after_step ) if not isinstance(self.update_after_step, snake_case__ ): raise ValueError('''Invalid update_after_step''' ) lowerCamelCase__ : Optional[int] =state_dict.get('''use_ema_warmup''', self.use_ema_warmup ) if not isinstance(self.use_ema_warmup, snake_case__ ): raise ValueError('''Invalid use_ema_warmup''' ) lowerCamelCase__ : Tuple =state_dict.get('''inv_gamma''', self.inv_gamma ) if not isinstance(self.inv_gamma, (float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowerCamelCase__ : Optional[int] =state_dict.get('''power''', self.power ) if not isinstance(self.power, (float, int) ): raise ValueError('''Invalid power''' ) lowerCamelCase__ : str =state_dict.get('''shadow_params''', snake_case__ ) if shadow_params is not None: lowerCamelCase__ : Dict =shadow_params if not isinstance(self.shadow_params, snake_case__ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(snake_case__, torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
706
"""simple docstring""" import os def snake_case__ ( ): """simple docstring""" with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ : Tuple =str(file.readlines()[0] ) lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : str =0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Dict =0 return total_score if __name__ == "__main__": print(solution())
625
0
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL A_ = logging.get_logger(__name__) def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): def constraint_to_multiple_of(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ): lowerCamelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCamelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCamelCase_ = math.ceil(val / multiple ) * multiple return x lowerCamelCase_ = (output_size, output_size) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else output_size lowerCamelCase_ , lowerCamelCase_ = get_image_size(lowerCAmelCase__ ) lowerCamelCase_ , lowerCamelCase_ = output_size # determine new height and width lowerCamelCase_ = output_height / input_height lowerCamelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCamelCase_ = scale_width else: # fit height lowerCamelCase_ = scale_height lowerCamelCase_ = constraint_to_multiple_of(scale_height * input_height ,multiple=lowerCAmelCase__ ) lowerCamelCase_ = constraint_to_multiple_of(scale_width * input_width ,multiple=lowerCAmelCase__ ) return (new_height, new_width) class __lowerCamelCase ( lowerCAmelCase ): a__: int = ['pixel_values'] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = False , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ): super().__init__(**UpperCAmelCase ) lowerCamelCase_ = size if size is not None else {'''height''': 384, '''width''': 384} lowerCamelCase_ = get_size_dict(UpperCAmelCase ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = keep_aspect_ratio lowerCamelCase_ = ensure_multiple_of lowerCamelCase_ = resample lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = 1 , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ): lowerCamelCase_ = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) lowerCamelCase_ = get_resize_output_image_size( UpperCAmelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase , multiple=UpperCAmelCase , ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ): return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ): return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ): lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(UpperCAmelCase ) lowerCamelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCamelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] lowerCamelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ): lowerCamelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase ) != len(UpperCAmelCase ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCAmelCase ): lowerCamelCase_ = target_sizes.numpy() lowerCamelCase_ = [] for idx in range(len(UpperCAmelCase ) ): lowerCamelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase ) lowerCamelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase ) else: lowerCamelCase_ = logits.argmax(dim=1 ) lowerCamelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
29
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp a_ = { 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } a_ = { 'RUCAIBox/mvp': 1_024, } class _lowercase ( snake_case_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = MvpTokenizer def __init__( self : int , snake_case : str=None , snake_case : int=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]="replace" , snake_case : Optional[int]="<s>" , snake_case : List[Any]="</s>" , snake_case : Dict="</s>" , snake_case : Tuple="<s>" , snake_case : Any="<unk>" , snake_case : Tuple="<pad>" , snake_case : List[str]="<mask>" , snake_case : int=False , snake_case : Tuple=True , **snake_case : Dict , ) -> Optional[Any]: """simple docstring""" super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) UpperCamelCase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space: UpperCamelCase_ : Optional[int] = getattr(snake_case , pre_tok_state.pop('type' ) ) UpperCamelCase_ : Optional[Any] = add_prefix_space UpperCamelCase_ : int = pre_tok_class(**snake_case ) UpperCamelCase_ : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCamelCase_ : Optional[int] = 'post_processor' UpperCamelCase_ : Optional[Any] = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: UpperCamelCase_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase_ : Any = tuple(state['sep'] ) if "cls" in state: UpperCamelCase_ : int = tuple(state['cls'] ) UpperCamelCase_ : Optional[int] = False if state.get('add_prefix_space' , snake_case ) != add_prefix_space: UpperCamelCase_ : Union[str, Any] = add_prefix_space UpperCamelCase_ : Optional[int] = True if state.get('trim_offsets' , snake_case ) != trim_offsets: UpperCamelCase_ : Dict = trim_offsets UpperCamelCase_ : Optional[int] = True if changes_to_apply: UpperCamelCase_ : str = getattr(snake_case , state.pop('type' ) ) UpperCamelCase_ : Union[str, Any] = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[Any] ) -> Tuple: """simple docstring""" UpperCamelCase_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value UpperCamelCase_ : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case : int , **snake_case : Dict ) -> BatchEncoding: """simple docstring""" UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : Optional[int] , **snake_case : int ) -> BatchEncoding: """simple docstring""" UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " 'to use it with pretokenized inputs.' ) return super()._encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCamelCase_ : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[str, Any] , snake_case : Union[str, Any]=None ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCamelCase_ : List[str] = [self.sep_token_id] UpperCamelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
417
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
__A : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def __a ( A__ : str , A__ : str , A__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(A__ )}" ) raise ValueError(A__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
698
1
import os def __lowerCAmelCase( ) -> Union[str, Any]: """simple docstring""" with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/grid.txt' ) as f: _A = [] # noqa: E741 for _ in range(20 ): l.append([int(_SCREAMING_SNAKE_CASE ) for x in f.readline().split()] ) _A = 0 # right for i in range(20 ): for j in range(17 ): _A = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _A = temp # down for i in range(17 ): for j in range(20 ): _A = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _A = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _A = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _A = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _A = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _A = temp return maximum if __name__ == "__main__": print(solution())
27
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
513
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ : Any = logging.get_logger(__name__) __magic_name__ : List[str] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class lowerCamelCase ( __snake_case ): """simple docstring""" lowerCAmelCase_ = """roberta""" def __init__( self , __UpperCamelCase=50265 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ): super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = hidden_act A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = initializer_range A_ = layer_norm_eps A_ = position_embedding_type A_ = use_cache A_ = classifier_dropout class lowerCamelCase ( __snake_case ): """simple docstring""" @property def lowercase_ ( self ): if self.task == "multiple-choice": A_ = {0: "batch", 1: "choice", 2: "sequence"} else: A_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
608
from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase : """simple docstring""" def __init__( self , __UpperCamelCase , __UpperCamelCase ): if len(__UpperCamelCase ) != degree + 1: raise ValueError( "The number of coefficients should be equal to the degree + 1." ) A_ = list(__UpperCamelCase ) A_ = degree def __add__( self , __UpperCamelCase ): if self.degree > polynomial_a.degree: A_ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __UpperCamelCase ) else: A_ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __UpperCamelCase ) def __sub__( self , __UpperCamelCase ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , __UpperCamelCase ): A_ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __UpperCamelCase ) def lowercase_ ( self , __UpperCamelCase ): A_ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): A_ = "" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__UpperCamelCase ) return polynomial def __repr__( self ): return self.__str__() def lowercase_ ( self ): A_ = [0] * self.degree for i in range(self.degree ): A_ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __UpperCamelCase ) def lowercase_ ( self , __UpperCamelCase = 0 ): A_ = [0] * (self.degree + 2) A_ = constant for i in range(self.degree + 1 ): A_ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __UpperCamelCase ) def __eq__( self , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , __UpperCamelCase ): return not self.__eq__(__UpperCamelCase )
608
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) class _snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __snake_case = ["input_features", "is_longer"] def __init__( self: str , __UpperCamelCase: List[str]=64 , __UpperCamelCase: Optional[int]=4_8000 , __UpperCamelCase: int=480 , __UpperCamelCase: Optional[Any]=10 , __UpperCamelCase: Dict=1024 , __UpperCamelCase: Tuple=0.0 , __UpperCamelCase: Union[str, Any]=False , __UpperCamelCase: float = 0 , __UpperCamelCase: float = 1_4000 , __UpperCamelCase: int = None , __UpperCamelCase: str = "fusion" , __UpperCamelCase: str = "repeatpad" , **__UpperCamelCase: List[Any] , ) -> Tuple: super().__init__( feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __magic_name__ : Optional[int] = top_db __magic_name__ : Dict = truncation __magic_name__ : Optional[Any] = padding __magic_name__ : List[str] = fft_window_size __magic_name__ : Dict = (fft_window_size >> 1) + 1 __magic_name__ : Optional[int] = hop_length __magic_name__ : str = max_length_s __magic_name__ : Tuple = max_length_s * sampling_rate __magic_name__ : str = sampling_rate __magic_name__ : Optional[Any] = frequency_min __magic_name__ : str = frequency_max __magic_name__ : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=_SCREAMING_SNAKE_CASE , max_frequency=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , norm=_SCREAMING_SNAKE_CASE , mel_scale="htk" , ) __magic_name__ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=_SCREAMING_SNAKE_CASE , max_frequency=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , ) def lowerCAmelCase__ ( self: Any ) -> Dict[str, Any]: __magic_name__ : List[str] = copy.deepcopy(self.__dict__ ) __magic_name__ : Optional[int] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCAmelCase__ ( self: Any , __UpperCamelCase: np.array , __UpperCamelCase: Optional[np.array] = None ) -> np.ndarray: __magic_name__ : Any = spectrogram( _SCREAMING_SNAKE_CASE , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_SCREAMING_SNAKE_CASE , log_mel="dB" , ) return log_mel_spectrogram.T def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: int , __UpperCamelCase: List[str] , __UpperCamelCase: Dict ) -> List[str]: __magic_name__ : Any = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk __magic_name__ : Any = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk __magic_name__ : Union[str, Any] = [0] # randomly choose index for each part __magic_name__ : Union[str, Any] = np.random.choice(ranges[0] ) __magic_name__ : Optional[Any] = np.random.choice(ranges[1] ) __magic_name__ : Tuple = np.random.choice(ranges[2] ) __magic_name__ : int = mel[idx_front : idx_front + chunk_frames, :] __magic_name__ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] __magic_name__ : int = mel[idx_back : idx_back + chunk_frames, :] __magic_name__ : Optional[int] = torch.tensor(mel[None, None, :] ) __magic_name__ : List[str] = torch.nn.functional.interpolate( _SCREAMING_SNAKE_CASE , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE ) __magic_name__ : int = mel_shrink[0][0].numpy() __magic_name__ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCAmelCase__ ( self: Union[str, Any] , __UpperCamelCase: np.array , __UpperCamelCase: List[Any] , __UpperCamelCase: str , __UpperCamelCase: Tuple ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": __magic_name__ : Optional[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __magic_name__ : str = len(_SCREAMING_SNAKE_CASE ) - max_length __magic_name__ : str = np.random.randint(0 , overflow + 1 ) __magic_name__ : Dict = waveform[idx : idx + max_length] __magic_name__ : Any = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :] elif truncation == "fusion": __magic_name__ : Dict = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters ) __magic_name__ : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __magic_name__ : Dict = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __magic_name__ : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) __magic_name__ : Union[str, Any] = False else: __magic_name__ : Tuple = self._random_mel_fusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __magic_name__ : Optional[Any] = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: __magic_name__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __magic_name__ : Union[str, Any] = int(max_length / len(_SCREAMING_SNAKE_CASE ) ) __magic_name__ : Tuple = np.stack(np.tile(_SCREAMING_SNAKE_CASE , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": __magic_name__ : Any = int(max_length / len(_SCREAMING_SNAKE_CASE ) ) __magic_name__ : Tuple = np.stack(np.tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __magic_name__ : Dict = np.pad(_SCREAMING_SNAKE_CASE , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 ) if truncation == "fusion": __magic_name__ : Optional[int] = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters ) __magic_name__ : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: __magic_name__ : Tuple = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self: Any , __UpperCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase: str = None , __UpperCamelCase: Optional[str] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , **__UpperCamelCase: Any , ) -> BatchFeature: __magic_name__ : List[str] = truncation if truncation is not None else self.truncation __magic_name__ : Tuple = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) __magic_name__ : int = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __magic_name__ : Optional[Any] = is_batched_numpy or ( isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __magic_name__ : List[str] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ): __magic_name__ : Dict = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __magic_name__ : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __magic_name__ : str = [np.asarray(_SCREAMING_SNAKE_CASE )] # convert to mel spectrogram, truncate and pad if needed. __magic_name__ : Optional[int] = [ self._get_input_mel(_SCREAMING_SNAKE_CASE , max_length if max_length else self.nb_max_samples , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for waveform in raw_speech ] __magic_name__ : Optional[int] = [] __magic_name__ : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(_SCREAMING_SNAKE_CASE ) is_longer.append(_SCREAMING_SNAKE_CASE ) if truncation == "fusion" and sum(_SCREAMING_SNAKE_CASE ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __magic_name__ : int = np.random.randint(0 , len(_SCREAMING_SNAKE_CASE ) ) __magic_name__ : int = True if isinstance(input_mel[0] , _SCREAMING_SNAKE_CASE ): __magic_name__ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool __magic_name__ : str = [[longer] for longer in is_longer] __magic_name__ : List[str] = {"input_features": input_mel, "is_longer": is_longer} __magic_name__ : Dict = BatchFeature(_SCREAMING_SNAKE_CASE ) if return_tensors is not None: __magic_name__ : Dict = input_features.convert_to_tensors(_SCREAMING_SNAKE_CASE ) return input_features
436
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __snake_case : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model') __snake_case : Union[str, Any] = {'target_lang': 'fi', 'source_lang': 'en'} __snake_case : Union[str, Any] = '>>zh<<' __snake_case : List[str] = 'Helsinki-NLP/' if is_torch_available(): __snake_case : Optional[int] = 'pt' elif is_tf_available(): __snake_case : List[Any] = 'tf' else: __snake_case : Union[str, Any] = 'jax' @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = MarianTokenizer SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple: """simple docstring""" super().setUp() __lowerCAmelCase : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] __lowerCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE)))) __lowerCAmelCase : int = Path(self.tmpdirname) save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab"]) save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"]) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["source_spm"]) copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["target_spm"]) __lowerCAmelCase : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self: Dict , **_SCREAMING_SNAKE_CASE: List[str]) -> MarianTokenizer: """simple docstring""" return MarianTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Dict: """simple docstring""" return ( "This is a test", "This is a test", ) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int: """simple docstring""" __lowerCAmelCase : Union[str, Any] = "</s>" __lowerCAmelCase : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "</s>") self.assertEqual(vocab_keys[1] , "<unk>") self.assertEqual(vocab_keys[-1] , "<pad>") self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 9) def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 9) def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any: """simple docstring""" __lowerCAmelCase : Optional[Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""") __lowerCAmelCase : int = en_de_tokenizer(["I am a small frog"] , return_tensors=_SCREAMING_SNAKE_CASE) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(_SCREAMING_SNAKE_CASE , batch.input_ids[0]) __lowerCAmelCase : Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : List[Any] = [x.name for x in Path(_SCREAMING_SNAKE_CASE).glob("*")] self.assertIn("source.spm" , _SCREAMING_SNAKE_CASE) MarianTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : int = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.assertEqual(batch.input_ids.shape , (2, 512)) def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = tok(["I am a tiny frog", "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.assertEqual(batch_smaller.input_ids.shape , (2, 10)) @slow def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Optional[int] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any: """simple docstring""" __lowerCAmelCase : List[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs") __lowerCAmelCase : List[str] = "Tämä on testi" __lowerCAmelCase : int = "This is a test" __lowerCAmelCase : Union[str, Any] = [76, 7, 2047, 2] __lowerCAmelCase : Dict = [69, 12, 11, 940, 2] __lowerCAmelCase : List[str] = tokenizer(_SCREAMING_SNAKE_CASE).input_ids self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : str = tokenizer(text_target=_SCREAMING_SNAKE_CASE).input_ids self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
293
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : Union[str, Any] = """platform""" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): if attention_mask is None: __lowercase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __lowercase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __lowercase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _A : '''simple docstring''' def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : List[Any]=13 , lowerCamelCase : Tuple=7 , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Optional[int]=99 , lowerCamelCase : Any=16 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : Tuple=2 , lowerCamelCase : List[Any]=1 , lowerCamelCase : Tuple=0 , lowerCamelCase : Optional[Any]=0.02 , ): '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = bos_token_id __lowercase = initializer_range def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __lowercase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __lowercase = shift_tokens_right(lowerCamelCase , 1 , 2 ) __lowercase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase , ) __lowercase = prepare_blenderbot_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return config, inputs_dict def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase , __lowercase = self.prepare_config_and_inputs() return config, inputs_dict def _snake_case ( self : int , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ): '''simple docstring''' __lowercase = 20 __lowercase = model_class_name(lowerCamelCase ) __lowercase = model.encode(inputs_dict["input_ids"] ) __lowercase , __lowercase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) __lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowercase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowercase = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , ) __lowercase = model.decode(lowerCamelCase , lowerCamelCase ) __lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : str ): '''simple docstring''' __lowercase = 20 __lowercase = model_class_name(lowerCamelCase ) __lowercase = model.encode(inputs_dict["input_ids"] ) __lowercase , __lowercase = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowercase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) __lowercase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowercase = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __lowercase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase ) __lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class _A ( unittest.TestCase ): '''simple docstring''' _snake_case : Optional[Any] = 99 def _snake_case ( self : List[str] ): '''simple docstring''' __lowercase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __lowercase = input_ids.shape[0] __lowercase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase , __lowercase , __lowercase = self._get_config_and_data() __lowercase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase ) __lowercase = lm_model(input_ids=lowerCamelCase ) __lowercase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , lowerCamelCase ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __lowercase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase ) __lowercase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __lowercase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __lowercase = lm_model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ) __lowercase = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __lowercase = shift_tokens_right(lowerCamelCase , 1 , 2 ) __lowercase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum() __lowercase = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowerCamelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _A ( _lowercase , unittest.TestCase , _lowercase ): '''simple docstring''' _snake_case : Union[str, Any] = True _snake_case : Tuple = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _snake_case : Tuple = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = FlaxBlenderbotSmallModelTester(self ) def _snake_case ( self : str ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def _snake_case ( self : Tuple ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def _snake_case ( self : List[Any] ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) __lowercase = model_class(lowerCamelCase ) @jax.jit def encode_jitted(lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=None , **lowerCamelCase : Optional[Any] ): return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ) with self.subTest("JIT Enabled" ): __lowercase = encode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = encode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = model_class(lowerCamelCase ) __lowercase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __lowercase = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ): return model.decode( decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , ) with self.subTest("JIT Enabled" ): __lowercase = decode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = decode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _snake_case ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("facebook/blenderbot_small-90M" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __lowercase = np.ones((1, 1) ) * model.config.eos_token_id __lowercase = model(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase )
655
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _A : '''simple docstring''' _snake_case : int _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""") def snake_case_ ( _SCREAMING_SNAKE_CASE ): if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowercase , __lowercase = get_distrib(node.left ) __lowercase , __lowercase = get_distrib(node.right ) __lowercase = 1 - left_distrib_excess __lowercase = 1 - right_distrib_excess __lowercase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) __lowercase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
655
1
from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCAmelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list[int]: if not isinstance(_snake_case , _snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) _A = [] for num in range(len(_snake_case ) ): _A = 0 while 2 * i * i <= odd_composites[num]: _A = odd_composites[num] - 2 * i * i if is_prime(_snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(_snake_case ) == n: return list_nums return [] def SCREAMING_SNAKE_CASE_ ( ) -> int: return compute_nums(1 )[0] if __name__ == "__main__": print(f'{solution() = }')
2
def snake_case__ ( lowercase , lowercase ): if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
613
0
'''simple docstring''' from math import pi, sqrt, tan def __snake_case ( UpperCAmelCase_ : float ): if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __snake_case ( UpperCAmelCase_ : float ): if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def __snake_case ( UpperCAmelCase_ : float ): if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) lowerCamelCase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(UpperCAmelCase_ , 2 ) * torus_radius * tube_radius def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def __snake_case ( UpperCAmelCase_ : float ): if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) lowerCamelCase_ = (sidea + sidea + sidea) / 2 lowerCamelCase_ = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def __snake_case ( UpperCAmelCase_ : float ): if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : float ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("""[DEMO] Areas of various geometric shapes: \n""") print(f'''Rectangle: {area_rectangle(10, 20) = }''') print(f'''Square: {area_square(10) = }''') print(f'''Triangle: {area_triangle(10, 10) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''') print(f'''Parallelogram: {area_parallelogram(10, 20) = }''') print(f'''Rhombus: {area_rhombus(10, 20) = }''') print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''') print(f'''Circle: {area_circle(20) = }''') print(f'''Ellipse: {area_ellipse(10, 20) = }''') print("""\nSurface Areas of various geometric shapes: \n""") print(f'''Cube: {surface_area_cube(20) = }''') print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''') print(f'''Sphere: {surface_area_sphere(20) = }''') print(f'''Hemisphere: {surface_area_hemisphere(20) = }''') print(f'''Cone: {surface_area_cone(10, 20) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''') print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''') print(f'''Torus: {surface_area_torus(20, 10) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''') print(f'''Square: {area_reg_polygon(4, 10) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
445
'''simple docstring''' import math def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ): return math.pow(UpperCAmelCase_ , 2 ) - a def __snake_case ( UpperCAmelCase_ : float ): return 2 * x def __snake_case ( UpperCAmelCase_ : float ): lowerCamelCase_ = 2.0 while start <= a: lowerCamelCase_ = math.pow(UpperCAmelCase_ , 2 ) return start def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00_0000_0000_0001 ): if a < 0: raise ValueError("math domain error" ) lowerCamelCase_ = get_initial_point(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ): lowerCamelCase_ = value lowerCamelCase_ = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
445
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE : List[Any] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
89
def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = int(_A ) if n_element < 1: SCREAMING_SNAKE_CASE__ = ValueError('''a should be a positive number''' ) raise my_error SCREAMING_SNAKE_CASE__ = [1] SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (0, 0, 0) SCREAMING_SNAKE_CASE__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Tuple = input('''Enter the last number (nth term) of the Hamming Number Series: ''') print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''') _SCREAMING_SNAKE_CASE : int = hamming(int(n)) print('''-----------------------------------------------------''') print(F"The list with nth numbers is: {hamming_numbers}") print('''-----------------------------------------------------''')
493
0
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__: """simple docstring""" def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple=3 , snake_case__ : int=32 , snake_case__ : Optional[Any]=3 , snake_case__ : List[Any]=10 , snake_case__ : Optional[Any]=[10, 20, 30, 40] , snake_case__ : Dict=[1, 1, 2, 1] , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : str="relu" , snake_case__ : List[Any]=3 , snake_case__ : Dict=None , ): """simple docstring""" A =parent A =batch_size A =image_size A =num_channels A =embeddings_size A =hidden_sizes A =depths A =is_training A =use_labels A =hidden_act A =num_labels A =scope A =len(snake_case__ ) def _a ( self : Tuple ): """simple docstring""" A =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A =None if self.use_labels: A =ids_tensor([self.batch_size] , self.num_labels ) A =self.get_config() return config, pixel_values, labels def _a ( self : List[str] ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ): """simple docstring""" A =RegNetModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ): """simple docstring""" A =self.num_labels A =RegNetForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() A =model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Dict ): """simple docstring""" A =self.prepare_config_and_inputs() A , A , A =config_and_inputs A ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () _A = ( {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) _A = False _A = False _A = False _A = False def _a ( self : List[str] ): """simple docstring""" A =RegNetModelTester(self ) A =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : int ): """simple docstring""" return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _a ( self : List[str] ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A =model_class(snake_case__ ) A =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A =[*signature.parameters.keys()] A =["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def _a ( self : Optional[int] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _a ( self : List[Any] ): """simple docstring""" A , A =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A =model_class(config=snake_case__ ) for name, module in model.named_modules(): if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def _a ( self : Any ): """simple docstring""" def check_hidden_states_output(snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : List[str] ): A =model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): A =model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) A =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A =self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A =self.model_tester.prepare_config_and_inputs_for_common() A =["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A =layer_type A =True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A =True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def _a ( self : Union[str, Any] ): """simple docstring""" A =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def _a ( self : List[Any] ): """simple docstring""" for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A =RegNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCamelCase_ ( ) ->Tuple: A =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase__( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Any ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[int] ): """simple docstring""" A =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ ) A =self.default_image_processor A =prepare_img() A =image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): A =model(**snake_case__ ) # verify the logits A =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , snake_case__ ) A =torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
710
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__: """simple docstring""" _A = 42 _A = None _A = None __a = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( a_ ) ->int: if root is None: return 0 # Validation def count_nodes(a_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(a_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(a_ ) != count_coins(a_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(a_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A , A =get_distrib(node.left ) A , A =get_distrib(node.right ) A =1 - left_distrib_excess A =1 - right_distrib_excess A =( left_distrib_moves + right_distrib_moves + abs(a_ ) + abs(a_ ) ) A =node.data - coins_to_left - coins_to_right return CoinsDistribResult(a_ , a_ ) return get_distrib(a_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
689
0
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a_ : List[str] = '▁' a_ : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _snake_case ( A__ , unittest.TestCase ): _lowercase : str = BigBirdTokenizer _lowercase : Any = BigBirdTokenizerFast _lowercase : Tuple = True _lowercase : int = True def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: super().setUp() SCREAMING_SNAKE_CASE = self.tokenizer_class(a , keep_accents=a) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = '<s>' SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<unk>') self.assertEqual(vocab_keys[1] , '<s>') self.assertEqual(vocab_keys[-1] , '[MASK]') self.assertEqual(len(a) , 1004) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1000) def SCREAMING_SNAKE_CASE__ ( self) -> str: if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE = tokenizer.tokenize(a) SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a) self.assertListEqual(a , a) SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a) SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a , add_special_tokens=a) self.assertListEqual(a , a) SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE = tokenizer.encode(a) SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a) self.assertListEqual(a , a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = BigBirdTokenizer(a , keep_accents=a) SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test') self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(a) , [285, 46, 10, 170, 382] , ) SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a) self.assertListEqual( a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') @slow def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = 'Hello World!' SCREAMING_SNAKE_CASE = [65, 1_8536, 2260, 101, 66] self.assertListEqual(a , self.big_tokenizer.encode(a)) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) # fmt: off SCREAMING_SNAKE_CASE = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(a , self.big_tokenizer.encode(a)) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self) -> Any: import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys())[:10] SCREAMING_SNAKE_CASE = ' '.join(a) SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a) SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a) SCREAMING_SNAKE_CASE = BigBirdConfig(attention_type='original_full') SCREAMING_SNAKE_CASE = BigBirdModel(a) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a) model(**a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer('Paris is the [MASK].').input_ids) self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]') @slow def SCREAMING_SNAKE_CASE__ ( self) -> Dict: # fmt: off SCREAMING_SNAKE_CASE = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
73
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
0
'''simple docstring''' import os def lowerCAmelCase( a__ : str = "input.txt" ): '''simple docstring''' with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file: lowerCamelCase__ = [ [int(a__ ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ = len(a__ ) lowerCamelCase__ = len(matrix[0] ) lowerCamelCase__ = [[-1 for _ in range(a__ )] for _ in range(a__ )] for i in range(a__ ): lowerCamelCase__ = matrix[i][0] for j in range(1 , a__ ): for i in range(a__ ): lowerCamelCase__ = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a__ ): lowerCamelCase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
426
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_ : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.0_2 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=None , ): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = num_stages lowerCamelCase__ = hidden_sizes lowerCamelCase__ = depths lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = num_labels lowerCamelCase__ = initializer_range lowerCamelCase__ = out_features lowerCamelCase__ = out_indices lowerCamelCase__ = scope def __UpperCAmelCase ( self): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = ConvNextVaModel(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = ConvNextVaForImageClassification(UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase , labels=UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = ConvNextVaBackbone(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase) # verify hidden states self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None lowerCamelCase__ = None lowerCamelCase__ = ConvNextVaBackbone(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def __UpperCAmelCase ( self): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"pixel_values": pixel_values} return config, inputs_dict def __UpperCAmelCase ( self): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class snake_case_ ( A__ , A__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase : Optional[Any] =( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __lowerCAmelCase : Optional[int] =( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase : Dict =False __lowerCAmelCase : List[Any] =False __lowerCAmelCase : Union[str, Any] =False __lowerCAmelCase : Tuple =False __lowerCAmelCase : Tuple =False def __UpperCAmelCase ( self): lowerCamelCase__ = ConvNextVaModelTester(self) lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37) def __UpperCAmelCase ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds") def __UpperCAmelCase ( self): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings") def __UpperCAmelCase ( self): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking") def __UpperCAmelCase ( self): pass def __UpperCAmelCase ( self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels() lowerCamelCase__ = True if model_class.__name__ in [ *get_values(UpperCamelCase), *get_values(UpperCamelCase), ]: continue lowerCamelCase__ = model_class(UpperCamelCase) model.to(UpperCamelCase) model.train() lowerCamelCase__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase) lowerCamelCase__ = model(**UpperCamelCase).loss loss.backward() def __UpperCAmelCase ( self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels() lowerCamelCase__ = False lowerCamelCase__ = True if ( model_class.__name__ in [*get_values(UpperCamelCase), *get_values(UpperCamelCase)] or not model_class.supports_gradient_checkpointing ): continue lowerCamelCase__ = model_class(UpperCamelCase) model.to(UpperCamelCase) model.gradient_checkpointing_enable() model.train() lowerCamelCase__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase) lowerCamelCase__ = model(**UpperCamelCase).loss loss.backward() def __UpperCAmelCase ( self): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(UpperCamelCase) lowerCamelCase__ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase) def __UpperCAmelCase ( self): def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = model_class(UpperCamelCase) model.to(UpperCamelCase) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase)) lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase) , expected_num_stages + 1) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase) @slow def __UpperCAmelCase ( self): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = ConvNextVaModel.from_pretrained(UpperCamelCase) self.assertIsNotNone(UpperCamelCase) def lowerCAmelCase( ): '''simple docstring''' lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case_ ( unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None @slow def __UpperCAmelCase ( self): lowerCamelCase__ = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(UpperCamelCase) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = preprocessor(images=UpperCamelCase , return_tensors="pt").to(UpperCamelCase) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**UpperCamelCase) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , UpperCamelCase) lowerCamelCase__ = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6]).to(UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4))
426
1
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py UpperCAmelCase_ : List[Any] = '.' if __name__ == "__main__": UpperCAmelCase_ : Any = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Union[str, Any] = [] with open(doctest_file_path) as fp: for line in fp: UpperCAmelCase_ : int = line.strip() UpperCAmelCase_ : str = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: UpperCAmelCase_ : int = '\n'.join(non_existent_paths) raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}") if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
533
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowercase__ : '''simple docstring''' def __init__( self , __snake_case , __snake_case=13 , __snake_case=32 , __snake_case=2 , __snake_case=3 , __snake_case=16 , __snake_case=[1, 2, 1] , __snake_case=[2, 2, 4] , __snake_case=2 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=True , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=10 , __snake_case=8 , __snake_case=["stage1", "stage2", "stage3"] , __snake_case=[1, 2, 3] , ): _SCREAMING_SNAKE_CASE : List[Any] = parent _SCREAMING_SNAKE_CASE : List[str] = batch_size _SCREAMING_SNAKE_CASE : str = image_size _SCREAMING_SNAKE_CASE : Any = patch_size _SCREAMING_SNAKE_CASE : int = num_channels _SCREAMING_SNAKE_CASE : Optional[int] = embed_dim _SCREAMING_SNAKE_CASE : List[str] = depths _SCREAMING_SNAKE_CASE : Optional[Any] = num_heads _SCREAMING_SNAKE_CASE : Dict = window_size _SCREAMING_SNAKE_CASE : int = mlp_ratio _SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : str = drop_path_rate _SCREAMING_SNAKE_CASE : List[Any] = hidden_act _SCREAMING_SNAKE_CASE : List[str] = use_absolute_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = patch_norm _SCREAMING_SNAKE_CASE : str = layer_norm_eps _SCREAMING_SNAKE_CASE : List[Any] = initializer_range _SCREAMING_SNAKE_CASE : Union[str, Any] = is_training _SCREAMING_SNAKE_CASE : Tuple = scope _SCREAMING_SNAKE_CASE : Optional[Any] = use_labels _SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size _SCREAMING_SNAKE_CASE : Tuple = encoder_stride _SCREAMING_SNAKE_CASE : Dict = out_features _SCREAMING_SNAKE_CASE : Optional[Any] = out_indices def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE : Any = None if self.use_labels: _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE : List[str] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ): _SCREAMING_SNAKE_CASE : Any = MaskFormerSwinModel(config=__snake_case ) model.to(__snake_case ) model.eval() _SCREAMING_SNAKE_CASE : Any = model(__snake_case ) _SCREAMING_SNAKE_CASE : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _SCREAMING_SNAKE_CASE : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ): _SCREAMING_SNAKE_CASE : str = MaskFormerSwinBackbone(config=__snake_case ) model.to(__snake_case ) model.eval() _SCREAMING_SNAKE_CASE : Optional[int] = model(__snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__snake_case ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["""stem"""] _SCREAMING_SNAKE_CASE : Any = MaskFormerSwinBackbone(config=__snake_case ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = config_and_inputs _SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase__ ( _snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' A_ : Any = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A_ : List[Any] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} A_ : Optional[Any] = False A_ : Dict = False A_ : Optional[int] = False A_ : Union[str, Any] = False A_ : Union[str, Any] = False def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = MaskFormerSwinModelTester(self ) _SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=__snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def UpperCAmelCase_ ( self ): pass def UpperCAmelCase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ ( self ): return def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ): pass @unittest.skip("""Swin does not support feedforward chunking""" ) def UpperCAmelCase_ ( self ): pass def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Dict = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[int] = model_class(__snake_case ) _SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def UpperCAmelCase_ ( self ): pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def UpperCAmelCase_ ( self ): pass def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ): _SCREAMING_SNAKE_CASE : List[str] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _SCREAMING_SNAKE_CASE : str = outputs.hidden_states _SCREAMING_SNAKE_CASE : Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # Swin has a different seq_length _SCREAMING_SNAKE_CASE : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Union[str, Any] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : List[str] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : str = 3 _SCREAMING_SNAKE_CASE : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _SCREAMING_SNAKE_CASE : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : List[str] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Dict = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def UpperCAmelCase_ ( self ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def UpperCAmelCase_ ( self ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def UpperCAmelCase_ ( self ): pass def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__snake_case ): _SCREAMING_SNAKE_CASE : Optional[int] = 0 return t def check_equivalence(__snake_case , __snake_case , __snake_case , __snake_case={} ): with torch.no_grad(): _SCREAMING_SNAKE_CASE : Dict = model(**__snake_case , return_dict=__snake_case , **__snake_case ) _SCREAMING_SNAKE_CASE : Optional[int] = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple() def recursive_check(__snake_case , __snake_case ): if isinstance(__snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ): recursive_check(__snake_case , __snake_case ) elif isinstance(__snake_case , __snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__snake_case , __snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1e-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has""" f""" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}.""" ) , ) recursive_check(__snake_case , __snake_case ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() _SCREAMING_SNAKE_CASE : str = self._prepare_for_class(__snake_case , __snake_case ) _SCREAMING_SNAKE_CASE : str = self._prepare_for_class(__snake_case , __snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case ) _SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _SCREAMING_SNAKE_CASE : int = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case ) _SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(__snake_case , __snake_case ) _SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(__snake_case , __snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} ) _SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} ) @require_torch class lowercase__ ( unittest.TestCase , _snake_case ): '''simple docstring''' A_ : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () A_ : List[Any] = MaskFormerSwinConfig def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerSwinModelTester(self ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : List[Any] = backbone_class(__snake_case ) backbone.to(__snake_case ) backbone.eval() _SCREAMING_SNAKE_CASE : List[str] = backbone(**__snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _SCREAMING_SNAKE_CASE : Optional[Any] = backbone(**__snake_case , output_hidden_states=__snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _SCREAMING_SNAKE_CASE : Optional[int] = backbone(**__snake_case , output_attentions=__snake_case ) self.assertIsNotNone(outputs.attentions )
533
1
def lowerCamelCase__ (_UpperCAmelCase = 10**9): SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
700
import os from collections.abc import Iterator def lowerCamelCase__ (_UpperCAmelCase = "."): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase): SCREAMING_SNAKE_CASE = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase)[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase).lstrip('./') def lowerCamelCase__ (_UpperCAmelCase): return F'''{i * ' '}*''' if i else "\n##" def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = old_path.split(os.sep) for i, new_part in enumerate(new_path.split(os.sep)): if (i + 1 > len(_UpperCAmelCase) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(_UpperCAmelCase)} {new_part.replace('_' , ' ').title()}''') return new_path def lowerCamelCase__ (_UpperCAmelCase = "."): SCREAMING_SNAKE_CASE = '' for filepath in sorted(good_file_paths(_UpperCAmelCase)): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = os.path.split(_UpperCAmelCase) if filepath != old_path: SCREAMING_SNAKE_CASE = print_path(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = (filepath.count(os.sep) + 1) if filepath else 0 SCREAMING_SNAKE_CASE = F'''{filepath}/{filename}'''.replace(' ' , '%20') SCREAMING_SNAKE_CASE = os.path.splitext(filename.replace('_' , ' ').title())[0] print(F'''{md_prefix(_UpperCAmelCase)} [{filename}]({url})''') if __name__ == "__main__": print_directory_md('.')
444
0
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCamelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = os.path.join(__UpperCamelCase ,'words.txt' ) SCREAMING_SNAKE_CASE : Any = '' with open(__UpperCamelCase ) as f: SCREAMING_SNAKE_CASE : Dict = f.readline() SCREAMING_SNAKE_CASE : Union[str, Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] SCREAMING_SNAKE_CASE : str = [ word for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__UpperCamelCase ) if __name__ == "__main__": print(solution())
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
from typing import TYPE_CHECKING from ...utils import _LazyModule a_ = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
286
import inspect import unittest class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict: '''simple docstring''' try: import diffusers # noqa: F401 except ImportError: assert False def _UpperCAmelCase ( self: Dict ) -> Dict: '''simple docstring''' import diffusers from diffusers.dependency_versions_table import deps __UpperCAmelCase = inspect.getmembers(__lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": __UpperCAmelCase = "k-diffusion" elif backend == "invisible_watermark": __UpperCAmelCase = "invisible-watermark" assert backend in deps, F'''{backend} is not in the deps table!'''
286
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "audio-spectrogram-transformer" def __init__( self: Optional[Any] , __A: int=7_68 , __A: Optional[Any]=12 , __A: Tuple=12 , __A: Union[str, Any]=30_72 , __A: str="gelu" , __A: str=0.0 , __A: List[Any]=0.0 , __A: List[str]=0.02 , __A: List[str]=1e-12 , __A: Any=16 , __A: Dict=True , __A: Optional[Any]=10 , __A: Union[str, Any]=10 , __A: str=10_24 , __A: Optional[int]=1_28 , **__A: Tuple , ) -> List[Any]: super().__init__(**__A ) _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = patch_size _A = qkv_bias _A = frequency_stride _A = time_stride _A = max_length _A = num_mel_bins
484
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: Tuple ) -> Dict: _A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A , '''width_multiplier''' ) ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Dict , __A: Optional[Any] , __A: Optional[int]=13 , __A: Union[str, Any]=64 , __A: Dict=2 , __A: str=3 , __A: Dict="swish" , __A: List[str]=3 , __A: Union[str, Any]=32 , __A: str=0.1 , __A: int=0.02 , __A: Optional[Any]=True , __A: str=True , __A: List[Any]=10 , __A: Dict=None , __A: Optional[Any]=0.25 , __A: Optional[int]=0.0 , __A: Tuple=0.0 , ) -> Optional[int]: _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = make_divisible(5_12 * width_multiplier , divisor=8 ) _A = hidden_act _A = conv_kernel_size _A = output_stride _A = classifier_dropout_prob _A = use_labels _A = is_training _A = num_labels _A = initializer_range _A = scope _A = width_multiplier _A = ffn_dropout _A = attn_dropout def __A ( self: Dict ) -> List[str]: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.num_labels ) _A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self: Tuple ) -> Optional[Any]: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def __A ( self: Dict , __A: Union[str, Any] , __A: int , __A: Dict , __A: List[str] ) -> str: _A = MobileViTVaModel(config=__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self: str , __A: int , __A: Optional[Any] , __A: int , __A: Tuple ) -> Any: _A = self.num_labels _A = MobileViTVaForImageClassification(__A ) model.to(__A ) model.eval() _A = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self: List[Any] , __A: Optional[Any] , __A: Tuple , __A: int , __A: List[Any] ) -> Optional[Any]: _A = self.num_labels _A = MobileViTVaForSemanticSegmentation(__A ) model.to(__A ) model.eval() _A = model(__A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _A = model(__A , labels=__A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self: Dict ) -> List[Any]: _A = self.prepare_config_and_inputs() _A ,_A ,_A ,_A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) A_ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: str ) -> Optional[Any]: _A = MobileViTVaModelTester(self ) _A = MobileViTVaConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: Any ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def __A ( self: Any ) -> List[str]: pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def __A ( self: int ) -> Any: pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def __A ( self: Optional[Any] ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def __A ( self: Any ) -> Any: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[int] ) -> List[str]: pass def __A ( self: List[Any] ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) def __A ( self: List[str] ) -> int: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: str ) -> int: def check_hidden_states_output(__A: List[str] , __A: str , __A: Optional[int] ): _A = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__A , __A ) ) _A = outputs.hidden_states _A = 5 self.assertEqual(len(__A ) , __A ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _A = 2 for i in range(len(__A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(__A , __A , __A ) def __A ( self: str ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __A ( self: int ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) @slow def __A ( self: Dict ) -> Optional[Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = MobileViTVaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: int ) -> Optional[Any]: return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def __A ( self: Optional[Any] ) -> Optional[int]: _A = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): _A = model(**__A ) # verify the logits _A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __A ) _A = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) ) @slow def __A ( self: List[str] ) -> Tuple: _A = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _A = model.to(__A ) _A = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): _A = model(**__A ) _A = outputs.logits # verify the logits _A = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __A ) _A = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=__A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 ) ) @slow def __A ( self: List[Any] ) -> Optional[int]: _A = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _A = model.to(__A ) _A = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) _A = prepare_img() _A = image_processor(images=__A , return_tensors='''pt''' ).to(__A ) # forward pass with torch.no_grad(): _A = model(**__A ) _A = outputs.logits.detach().cpu() _A = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] ) _A = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __A ) _A = image_processor.post_process_semantic_segmentation(outputs=__A ) _A = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __A )
484
1
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase (a_ :List[str] , a_ :Any , a_ :Any , a_ :str , a_ :Optional[Any]) -> Optional[Any]: # Load configuration defined in the metadata file with open(a_) as metadata_file: lowercase :List[str] = json.load(a_) lowercase :Optional[Any] = LukeConfig(use_entity_aware_attention=a_ , **metadata['''model_config''']) # Load in the weights from the checkpoint_path lowercase :Dict = torch.load(a_ , map_location='''cpu''') # Load the entity vocab file lowercase :Dict = load_entity_vocab(a_) lowercase :int = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name''']) # Add special tokens to the token vocabulary for downstream tasks lowercase :List[str] = AddedToken('''<ent>''' , lstrip=a_ , rstrip=a_) lowercase :Optional[Any] = AddedToken('''<ent2>''' , lstrip=a_ , rstrip=a_) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]}) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""") tokenizer.save_pretrained(a_) with open(os.path.join(a_ , LukeTokenizer.vocab_files_names['''entity_vocab_file''']) , '''w''') as f: json.dump(a_ , a_) lowercase :Optional[Any] = LukeTokenizer.from_pretrained(a_) # Initialize the embeddings of the special tokens lowercase :str = state_dict['''embeddings.word_embeddings.weight'''] lowercase :int = word_emb[tokenizer.convert_tokens_to_ids(['''@'''])[0]].unsqueeze(0) lowercase :Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''])[0]].unsqueeze(0) lowercase :Dict = torch.cat([word_emb, ent_emb, enta_emb]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: lowercase :Tuple = F"""encoder.layer.{layer_index}.attention.self.""" lowercase :int = state_dict[prefix + matrix_name] lowercase :Dict = state_dict[prefix + matrix_name] lowercase :int = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowercase :Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] lowercase :Tuple = entity_emb[entity_vocab['''[MASK]''']] lowercase :Union[str, Any] = LukeModel(config=a_).eval() lowercase :List[str] = model.load_state_dict(a_ , strict=a_) if not (len(a_) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F"""Missing keys {', '.join(a_)}. Expected only missing embeddings.position_ids""") if not (all(key.startswith('''entity_predictions''') or key.startswith('''lm_head''') for key in unexpected_keys)): raise ValueError( '''Unexpected keys''' F""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}""") # Check outputs lowercase :str = LukeTokenizer.from_pretrained(a_ , task='''entity_classification''') lowercase :str = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) lowercase :str = (39, 42) lowercase :Any = tokenizer(a_ , entity_spans=[span] , add_prefix_space=a_ , return_tensors='''pt''') lowercase :List[Any] = model(**a_) # Verify word hidden states if model_size == "large": lowercase :str = torch.Size((1, 42, 1024)) lowercase :Any = torch.tensor( [[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]]) else: # base lowercase :Union[str, Any] = torch.Size((1, 42, 768)) lowercase :Tuple = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""") if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1E-4): raise ValueError # Verify entity hidden states if model_size == "large": lowercase :Union[str, Any] = torch.Size((1, 1, 1024)) lowercase :Any = torch.tensor([[0.04_66, -0.01_06, -0.01_79]]) else: # base lowercase :str = torch.Size((1, 1, 768)) lowercase :int = torch.tensor([[0.14_57, 0.10_44, 0.01_74]]) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""") if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1E-4): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(a_)) model.save_pretrained(a_) def lowerCamelCase (a_ :Union[str, Any]) -> Any: lowercase :Union[str, Any] = {} with open(a_ , '''r''' , encoding='''utf-8''') as f: for index, line in enumerate(a_): lowercase :str = line.rstrip().split('''\t''') lowercase :Tuple = index return entity_vocab if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) UpperCAmelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
713
"""simple docstring""" UpperCAmelCase = {str(digit): digit**5 for digit in range(10)} def lowerCamelCase (a_ :int) -> int: return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_)) def lowerCamelCase () -> int: return sum( number for number in range(1000 , 100_0000) if number == digits_fifth_powers_sum(a_)) if __name__ == "__main__": print(solution())
475
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Union[str, Any] , a__ : str , ): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = '''last''' UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def __snake_case ( self : str ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __snake_case ( self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Optional[int] , a__ : int , a__ : Tuple , a__ : Optional[Any] , a__ : Union[str, Any] , ): UpperCAmelCase = TFFlaubertModel(config=a__ ) UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} UpperCAmelCase = model(a__ ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self : str , a__ : Tuple , a__ : Tuple , a__ : Optional[int] , a__ : str , a__ : Any , a__ : List[str] , a__ : str , a__ : List[str] , a__ : int , ): UpperCAmelCase = TFFlaubertWithLMHeadModel(a__ ) UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int , a__ : int , ): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(a__ ) UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths} UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : Tuple , a__ : Dict , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : List[str] , a__ : Union[str, Any] , ): UpperCAmelCase = TFFlaubertForSequenceClassification(a__ ) UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths} UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __snake_case ( self : Any , a__ : Optional[int] , a__ : Dict , a__ : Any , a__ : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Any , a__ : Any , a__ : List[Any] , ): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=a__ ) UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Any , a__ : Any , a__ : Optional[int] , a__ : str , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Tuple , a__ : Tuple , ): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=a__ ) UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __snake_case ( self : int ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ( UpperCAmelCase ), ) = config_and_inputs UpperCAmelCase = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''langs''': token_type_ids, '''lengths''': input_lengths, } return config, inputs_dict @require_tf class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase =( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase =( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase =False _lowerCamelCase =False def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : str , a__ : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __snake_case ( self : int ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=a__ , emb_dim=37 ) def __snake_case ( self : Optional[int] ): self.config_tester.run_common_tests() def __snake_case ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*a__ ) def __snake_case ( self : List[Any] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*a__ ) def __snake_case ( self : List[Any] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*a__ ) def __snake_case ( self : Tuple ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*a__ ) def __snake_case ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*a__ ) def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*a__ ) @slow def __snake_case ( self : Dict ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" UpperCAmelCase = model(a__ )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , a__ ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
51
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a :str = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Tuple = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys a :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
12
"""simple docstring""" import os a :List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000} def _lowercase ( __lowerCAmelCase ) -> int: SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 while index < len(__lowerCAmelCase ) - 1: SCREAMING_SNAKE_CASE__ : List[Any] = SYMBOLS[numerals[index]] SCREAMING_SNAKE_CASE__ : Dict = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def _lowercase ( __lowerCAmelCase ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] = """""" SCREAMING_SNAKE_CASE__ : int = num // 1000 numerals += m_count * "M" num %= 1000 SCREAMING_SNAKE_CASE__ : List[str] = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 SCREAMING_SNAKE_CASE__ : List[Any] = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def _lowercase ( __lowerCAmelCase = "/p089_roman.txt" ) -> int: SCREAMING_SNAKE_CASE__ : int = 0 with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea: SCREAMING_SNAKE_CASE__ : str = filea.readlines() for line in lines: SCREAMING_SNAKE_CASE__ : Union[str, Any] = line.strip() SCREAMING_SNAKE_CASE__ : Dict = parse_roman_numerals(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = generate_roman_numerals(__lowerCAmelCase ) savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase ) return savings if __name__ == "__main__": print(f'{solution() = }')
12
1
"""simple docstring""" import json import sys def __a ( A , A ) -> Any: '''simple docstring''' with open(A , encoding="utf-8" ) as f: A__ = json.load(A ) A__ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(A ): A__ = results[benchmark_name] A__ = benchmark_name.split("/" )[-1] output_md.append(f"""### Benchmark: {benchmark_file_name}""" ) A__ = "| metric |" A__ = "|--------|" A__ = "| new / old (diff) |" for metric_name in sorted(A ): A__ = benchmark_res[metric_name] A__ = metric_vals["new"] A__ = metric_vals.get("old" , A ) A__ = metric_vals.get("diff" , A ) A__ = f""" {new_val:f}""" if isinstance(A , (int, float) ) else "None" if old_val is not None: val_str += f""" / {old_val:f}""" if isinstance(A , (int, float) ) else "None" if dif_val is not None: val_str += f""" ({dif_val:f})""" if isinstance(A , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(A , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(A ) ) if __name__ == "__main__": __UpperCAmelCase =sys.argv[1] __UpperCAmelCase =sys.argv[2] format_json_to_md(input_json_file, output_md_file)
337
"""simple docstring""" import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase__ : def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=5_12 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ): '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = hidden_dropout A__ = attention_dropout A__ = weight_tying A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope def lowercase_ ( self ): '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def lowercase_ ( self ): '''simple docstring''' return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def lowercase_ ( self ): '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' A__ = GPTNeoXJapaneseModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) A__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' A__ = True A__ = GPTNeoXJapaneseModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' A__ = True A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1 ) A__ = torch.cat([input_mask, next_mask] , dim=-1 ) A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ ) A__ = output_from_no_past["hidden_states"][0] A__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) ) def lowercase_ ( self ): '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): lowercase__ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () lowercase__ : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () lowercase__ : Optional[Any] = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) lowercase__ : Any = False lowercase__ : str = False lowercase__ : Tuple = False lowercase__ : str = False def lowercase_ ( self ): '''simple docstring''' A__ = GPTNeoXJapaneseModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowercase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ): '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ ) @slow def lowercase_ ( self ): '''simple docstring''' A__ = "abeja/gpt-neox-japanese-2.7b" A__ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] A__ = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] A__ = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ ) A__ = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ ) A__ = [] for prompt in prompts: A__ = tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids A__ = model.generate(UpperCamelCase__ , max_length=50 ) A__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) predicted_outputs += generated_string self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
337
1
from manim import * class _UpperCamelCase ( _A ): '''simple docstring''' def lowerCAmelCase__ ( self : Union[str, Any] ): UpperCamelCase_: Tuple = Rectangle(height=0.5 , width=0.5 ) UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCamelCase_: Union[str, Any] = [mem.copy() for i in range(6 )] UpperCamelCase_: Any = [mem.copy() for i in range(6 )] UpperCamelCase_: Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCamelCase_: Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCamelCase_: Optional[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCamelCase_: List[str] = Text("""CPU""" , font_size=24 ) UpperCamelCase_: Dict = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case_ ) UpperCamelCase_: Union[str, Any] = [mem.copy() for i in range(4 )] UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCamelCase_: Optional[Any] = Text("""GPU""" , font_size=24 ) UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) gpu.move_to([-1, -1, 0] ) self.add(snake_case_ ) UpperCamelCase_: Optional[Any] = [mem.copy() for i in range(6 )] UpperCamelCase_: Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCamelCase_: List[Any] = Text("""Model""" , font_size=24 ) UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) model.move_to([3, -1.0, 0] ) self.add(snake_case_ ) UpperCamelCase_: str = [] for i, rect in enumerate(snake_case_ ): rect.set_stroke(snake_case_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) UpperCamelCase_: List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=snake_case_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=snake_case_ , buff=0.0 ) self.add(snake_case_ ) cpu_targs.append(snake_case_ ) UpperCamelCase_: Optional[Any] = [mem.copy() for i in range(6 )] UpperCamelCase_: Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCamelCase_: Optional[int] = Text("""Loaded Checkpoint""" , font_size=24 ) UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , aligned_edge=snake_case_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) UpperCamelCase_: List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase_: int = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case_ , snake_case_ ) UpperCamelCase_: Optional[int] = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) UpperCamelCase_: int = MarkupText( f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ ) , Write(snake_case_ ) ) self.play(Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) ) UpperCamelCase_: Any = [] UpperCamelCase_: List[str] = [] for i, rect in enumerate(snake_case_ ): UpperCamelCase_: int = fill.copy().set_fill(snake_case_ , opacity=0.7 ) target.move_to(snake_case_ ) first_animations.append(GrowFromCenter(snake_case_ , run_time=1 ) ) UpperCamelCase_: int = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) ) self.play(*snake_case_ ) self.play(*snake_case_ ) self.wait()
670
import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE lowerCamelCase_ : List[str] = """config.json""" lowerCamelCase_ : Any = """diffusion_pytorch_model.bin""" lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack""" lowerCamelCase_ : Dict = """model.onnx""" lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors""" lowerCamelCase_ : Optional[Any] = """weights.pb""" lowerCamelCase_ : Optional[Any] = """https://huggingface.co""" lowerCamelCase_ : Union[str, Any] = default_cache_path lowerCamelCase_ : Tuple = """diffusers_modules""" lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules""")) lowerCamelCase_ : str = ["""fp16""", """non-ema"""] lowerCamelCase_ : List[Any] = """.self_attn"""
670
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase (__snake_case ): _UpperCamelCase = ["""image_processor""", """tokenizer"""] _UpperCamelCase = """Pix2StructImageProcessor""" _UpperCamelCase = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , A_ , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = False super().__init__(snake_case_ , snake_case_ ) def __call__( self , A_=None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 2048 , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) ->List[str]: '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: __lowerCAmelCase : Union[str, Any] = self.tokenizer __lowerCAmelCase : Any = self.tokenizer( text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values __lowerCAmelCase : List[str] = self.image_processor( snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , **snake_case_ ) else: # add pixel_values and bbox __lowerCAmelCase : Tuple = self.image_processor( snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , header_text=snake_case_ , **snake_case_ ) if text is not None and not self.image_processor.is_vqa: __lowerCAmelCase : Dict = self.tokenizer( text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , ) if "attention_mask" in text_encoding: __lowerCAmelCase : Union[str, Any] = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: __lowerCAmelCase : Any = text_encoding.pop('''input_ids''' ) else: __lowerCAmelCase : str = None if text_encoding is not None: encoding_image_processor.update(snake_case_ ) return encoding_image_processor def UpperCamelCase__ ( self , *A_ , **A_ ) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[Any]: '''simple docstring''' return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.tokenizer.model_input_names __lowerCAmelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
492
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCamelCase: '''simple docstring''' def __init__( self , snake_case_ , ): _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = 'gelu' _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase__ ( self ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = self.prepare_config_and_inputs() _A = True _A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = TFEsmModel(config=snake_case_ ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(snake_case_ ) _A = [input_ids, input_mask] _A = model(snake_case_ ) _A = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): _A = True _A = TFEsmModel(config=snake_case_ ) _A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } _A = model(snake_case_ ) _A = [input_ids, input_mask] _A = model(snake_case_ , encoder_hidden_states=snake_case_ ) # Also check the case where encoder outputs are not passed _A = model(snake_case_ , attention_mask=snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = TFEsmForMaskedLM(config=snake_case_ ) _A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = TFEsmForTokenClassification(config=snake_case_ ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self ): _A = self.prepare_config_and_inputs() ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def lowerCAmelCase__ ( self ): _A = TFEsmModelTester(self ) _A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) @slow def lowerCAmelCase__ ( self ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFEsmModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase__ ( self ): pass @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase__ ( self ): pass def lowerCAmelCase__ ( self ): _A, _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(snake_case_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _A = model.get_bias() assert isinstance(snake_case_ , snake_case_ ) for k, v in name.items(): assert isinstance(snake_case_ , tf.Variable ) else: _A = model.get_output_embeddings() assert x is None _A = model.get_bias() assert name is None @require_tf class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): _A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(snake_case_ )[0] _A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , snake_case_ ) # compare the actual values for a slice. _A = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def lowerCAmelCase__ ( self ): _A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _A = model(snake_case_ )[0] # compare the actual values for a slice. _A = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
27
0
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def UpperCAmelCase ( A__ , A__ , A__=0 ) -> Optional[Any]: # Format the message. if name is None: _snake_case : Any = None else: _snake_case : Tuple = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" _snake_case : Dict = fmt.format(A__ ) # Print and recurse (if needed). if isinstance(A__ , A__ ): if msg is not None: print(A__ ) for k in val.keys(): recursive_print(A__ , val[k] , spaces + 2 ) elif isinstance(A__ , torch.Tensor ): print(A__ , """:""" , val.size() ) else: print(A__ , """:""" , A__ ) def UpperCAmelCase ( A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]: # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. _snake_case : Any = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] _snake_case : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:] _snake_case : Union[str, Any] = param.view(*A__ ) _snake_case : Tuple = param.transpose(0 , 2 ) _snake_case : Tuple = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] _snake_case : str = (num_heads, num_splits, hidden_size) + input_shape[1:] _snake_case : Union[str, Any] = param.view(*A__ ) _snake_case : int = param.transpose(0 , 1 ).contiguous() _snake_case : List[Any] = param.view(*A__ ) return param def UpperCAmelCase ( A__ , A__ , A__ ) -> Tuple: # The converted output model. _snake_case : Union[str, Any] = {} # old versions did not store training args _snake_case : str = input_state_dict.get("""args""" , A__ ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) _snake_case : List[Any] = ds_args.padded_vocab_size _snake_case : Any = ds_args.max_position_embeddings _snake_case : List[Any] = ds_args.hidden_size _snake_case : List[str] = ds_args.num_layers _snake_case : Tuple = ds_args.num_attention_heads _snake_case : int = ds_args.ffn_hidden_size # pprint(config) # The number of heads. _snake_case : Union[str, Any] = config.n_head # The hidden_size per head. _snake_case : Tuple = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): _snake_case : Tuple = input_state_dict["""checkpoint_version"""] else: _snake_case : Dict = 0.0 # The model. _snake_case : Tuple = input_state_dict["""model"""] # The language model. _snake_case : str = model["""language_model"""] # The embeddings. _snake_case : List[str] = lm["""embedding"""] # The word embeddings. _snake_case : Optional[int] = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. _snake_case : List[Any] = word_embeddings[: config.vocab_size, :] _snake_case : str = word_embeddings # The position embeddings. _snake_case : Tuple = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] _snake_case : str = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. _snake_case : Union[str, Any] = pos_embeddings # The transformer. _snake_case : List[str] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. _snake_case : List[str] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. _snake_case : Union[str, Any] = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. _snake_case : List[Any] = layer_re.match(A__ ) # Stop if that's not a layer if m is None: break # The index of the layer. _snake_case : str = int(m.group(1 ) ) # The name of the operation. _snake_case : List[str] = m.group(2 ) # Is it a weight or a bias? _snake_case : Optional[int] = m.group(3 ) # The name of the layer. _snake_case : Optional[int] = f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): _snake_case : Optional[int] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2""" _snake_case : Any = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. _snake_case : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , A__ , A__ ) _snake_case : Optional[int] = causal_mask # Insert a "dummy" tensor for masked_bias. _snake_case : Any = torch.tensor(-1E4 , dtype=torch.floataa ) _snake_case : int = masked_bias _snake_case : int = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. _snake_case : int = out_val.transpose(0 , 1 ).contiguous() # Store. _snake_case : Union[str, Any] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": _snake_case : Tuple = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ ) # Store. No change of shape. _snake_case : List[Any] = out_val # Transpose the weights. elif weight_or_bias == "weight": _snake_case : List[str] = megatron_to_transformers[op_name] _snake_case : Optional[Any] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": _snake_case : Optional[int] = megatron_to_transformers[op_name] _snake_case : str = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. _snake_case : List[Any] = transformer["""final_layernorm.weight"""] _snake_case : Any = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. _snake_case : str = word_embeddings # It should be done! return output_state_dict def UpperCAmelCase ( ) -> Union[str, Any]: # Create the argument parser. _snake_case : int = argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=A__ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=A__ , help="""An optional config json file describing the pre-trained model.""" , ) _snake_case : Optional[Any] = parser.parse_args() # Extract the basename. _snake_case : Any = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: _snake_case : str = torch.load(A__ , map_location="""cpu""" ) else: _snake_case : int = torch.load(args.path_to_checkpoint , map_location="""cpu""" ) _snake_case : Optional[int] = input_state_dict.get("""args""" , A__ ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: _snake_case : Optional[int] = """gelu_fast""" elif ds_args.openai_gelu: _snake_case : Optional[int] = """gelu_new""" else: _snake_case : Optional[int] = """gelu""" else: # in the very early days this used to be "gelu_new" _snake_case : Any = """gelu_new""" # Spell out all parameters in case the defaults change. _snake_case : int = GPTaConfig( vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=A__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=A__ , summary_activation=A__ , summary_proj_to_labels=A__ , summary_first_dropout=0.1 , scale_attn_weights=A__ , use_cache=A__ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , ) else: _snake_case : Optional[Any] = GPTaConfig.from_json_file(args.config_file ) _snake_case : int = ["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) _snake_case : List[Any] = convert_megatron_checkpoint(A__ , A__ , A__ ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(A__ , A__ ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: _snake_case : Tuple = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": _snake_case : Any = """gpt2""" elif tokenizer_type == "PretrainedFromHF": _snake_case : Optional[Any] = ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: _snake_case : List[str] = """gpt2""" _snake_case : int = AutoTokenizer.from_pretrained(A__ ) _snake_case : Tuple = type(A__ ).__name__ _snake_case : Any = tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(A__ ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(A__ ) # Store the state_dict to file. _snake_case : Tuple = os.path.join(A__ , """pytorch_model.bin""" ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(A__ , A__ ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
700
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" def __lowerCamelCase( self ): """simple docstring""" return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def __lowerCamelCase( self ): """simple docstring""" _snake_case : Optional[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase( self ): """simple docstring""" _snake_case : Optional[Any] = self._create_example_records() _snake_case : Optional[int] = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(SCREAMING_SNAKE_CASE__ ): self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] ) def __lowerCamelCase( self ): """simple docstring""" _snake_case : Tuple = self._create_example_records() _snake_case : Optional[Any] = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) _snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def __lowerCamelCase( self ): # checks what happens with missing columns """simple docstring""" _snake_case : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}] _snake_case : List[str] = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def __lowerCamelCase( self ): # checks if the type can be inferred from the second record """simple docstring""" _snake_case : List[str] = [{"""col_1""": []}, {"""col_1""": [1, 2]}] _snake_case : str = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def __lowerCamelCase( self ): """simple docstring""" _snake_case : Optional[Any] = Dataset.from_list([] ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 ) self.assertListEqual(dset.column_names , [] )
519
0
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = BioGptTokenizer a_ = False def lowercase ( self : Union[str, Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __lowerCAmelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) __lowerCAmelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowercase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> int: __lowerCAmelCase = 'lower newer' __lowerCAmelCase = 'lower newer' return input_text, output_text def lowercase ( self : Optional[Any] ) -> int: __lowerCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file ) __lowerCAmelCase = 'lower' __lowerCAmelCase = ['low', 'er</w>'] __lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = tokens + ['<unk>'] __lowerCAmelCase = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) @slow def lowercase ( self : int ) -> Union[str, Any]: __lowerCAmelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) __lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) __lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
53
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class __magic_name__ ( lowercase__ , lowercase__ ): _SCREAMING_SNAKE_CASE : Dict = 'resnet' _SCREAMING_SNAKE_CASE : Optional[Any] = ['basic', 'bottleneck'] def __init__( self : Any , snake_case_ : Optional[int]=3 , snake_case_ : List[Any]=64 , snake_case_ : Union[str, Any]=[256, 512, 1024, 2048] , snake_case_ : Union[str, Any]=[3, 4, 6, 3] , snake_case_ : str="bottleneck" , snake_case_ : Tuple="relu" , snake_case_ : Union[str, Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=None , **snake_case_ : Any , ): super().__init__(**snake_case_ ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) __snake_case = num_channels __snake_case = embedding_size __snake_case = hidden_sizes __snake_case = depths __snake_case = layer_type __snake_case = hidden_act __snake_case = downsample_in_first_stage __snake_case = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )] __snake_case , __snake_case = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names ) class __magic_name__ ( lowercase__ ): _SCREAMING_SNAKE_CASE : Dict = version.parse('1.11' ) @property def lowerCAmelCase ( self : Optional[Any] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase ( self : str ): return 1e-3
163
0
import math def _snake_case ( __snake_case ) -> str: '''simple docstring''' UpperCAmelCase_ : int = 0 UpperCAmelCase_ : int = 0 while num > 0: UpperCAmelCase_ : Tuple = num % 8 UpperCAmelCase_ : Optional[Any] = octal + (remainder * math.floor(math.pow(1_0 , __snake_case ) )) counter += 1 UpperCAmelCase_ : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(__snake_case )}""" def _snake_case ( ) -> None: '''simple docstring''' print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(6_5 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(2_1_6 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(5_1_2 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
455
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase = 16 __lowerCamelCase = 32 def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = 1_6 ) -> Tuple: '''simple docstring''' UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ : List[Any] = DatasetDict( { "train": dataset["train"].select(__snake_case ), "validation": dataset["train"].select(__snake_case ), "test": dataset["validation"], } ) def tokenize_function(__snake_case ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : int = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : Optional[Any] = 1_6 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : str = 8 else: UpperCAmelCase_ : Dict = None return tokenizer.pad( __snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ : List[str] = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) UpperCAmelCase_ : Union[str, Any] = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) UpperCAmelCase_ : str = DataLoader( tokenized_datasets["test"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader, test_dataloader def _snake_case ( __snake_case , __snake_case ) -> List[str]: '''simple docstring''' UpperCAmelCase_ : Tuple = [] # Download the dataset UpperCAmelCase_ : Optional[int] = load_dataset("glue" , "mrpc" ) # Create our splits UpperCAmelCase_ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : Optional[int] = config["lr"] UpperCAmelCase_ : Dict = int(config["num_epochs"] ) UpperCAmelCase_ : Union[str, Any] = int(config["seed"] ) UpperCAmelCase_ : Optional[Any] = int(config["batch_size"] ) UpperCAmelCase_ : Optional[Any] = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation UpperCAmelCase_ : Optional[int] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase_ : int = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE set_seed(__snake_case ) # New Code # # Create our folds: UpperCAmelCase_ : int = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] ) UpperCAmelCase_ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = get_fold_dataloaders( __snake_case , __snake_case , __snake_case , __snake_case , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : Tuple = AdamW(params=model.parameters() , lr=__snake_case ) # Instantiate scheduler UpperCAmelCase_ : str = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ : Optional[Any] = model(**__snake_case ) UpperCAmelCase_ : Dict = outputs.loss UpperCAmelCase_ : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : str = model(**__snake_case ) UpperCAmelCase_ : Optional[int] = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) UpperCAmelCase_ : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __snake_case ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase_ : Union[str, Any] = [] for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Tuple = model(**__snake_case ) UpperCAmelCase_ : Optional[int] = outputs.logits UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__snake_case , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase_ : Union[str, Any] = torch.cat(__snake_case , dim=0 ) UpperCAmelCase_ : str = torch.stack(__snake_case , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase_ : List[str] = metric.compute(predictions=__snake_case , references=__snake_case ) accelerator.print("Average test metrics from all folds:" , __snake_case ) def _snake_case ( ) -> str: '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds" , type=__snake_case , default=3 , help="The number of splits to perform across the dataset" ) UpperCAmelCase_ : Any = parser.parse_args() UpperCAmelCase_ : List[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
455
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""PerceiverFeatureExtractor"""] __UpperCAmelCase = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
379
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __UpperCAmelCase = """CompVis/stable-diffusion-v1-1""" __UpperCAmelCase = """CompVis/stable-diffusion-v1-2""" __UpperCAmelCase = """CompVis/stable-diffusion-v1-3""" __UpperCAmelCase = """CompVis/stable-diffusion-v1-4""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : StableDiffusionSafetyChecker , lowerCamelCase_ : CLIPImageProcessor , lowerCamelCase_ : bool = True , ): '''simple docstring''' super()._init_() SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline( vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith("""_""" )} def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' self.enable_attention_slicing(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) @torch.no_grad() def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) @torch.no_grad() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : str , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) @torch.no_grad() def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Dict , ): '''simple docstring''' return self.pipea( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) @torch.no_grad() def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(lowerCamelCase_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : List[str] = self.textaimg_sda_a( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : int = self.textaimg_sda_a( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a( prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
379
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[Any] = logging.get_logger(__name__) lowercase : str = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class __lowercase ( snake_case__ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = '''xlnet''' UpperCAmelCase_ : Optional[int] = ['''mems'''] UpperCAmelCase_ : Optional[int] = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=10_24 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=40_96 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Any: A : Any = vocab_size A : Any = d_model A : Tuple = n_layer A : Dict = n_head if d_model % n_head != 0: raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' ) A : Any = d_model // n_head A : Union[str, Any] = ff_activation A : Dict = d_inner A : Optional[int] = untie_r A : Any = attn_type A : Tuple = initializer_range A : List[Any] = layer_norm_eps A : str = dropout A : Tuple = mem_len A : Optional[Any] = reuse_len A : Any = bi_data A : Optional[int] = clamp_len A : int = same_length A : int = summary_type A : Tuple = summary_use_proj A : Dict = summary_activation A : Optional[int] = summary_last_dropout A : str = start_n_top A : List[str] = end_n_top A : Union[str, Any] = bos_token_id A : List[Any] = pad_token_id A : Any = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , lowercase_ , ) A : Optional[int] = kwargs["use_cache"] A : Dict = use_mems_eval A : Optional[int] = use_mems_train super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) @property def snake_case ( self ) -> str: logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def snake_case ( self , __UpperCAmelCase ) -> List[str]: # Message copied from Transformer-XL documentation raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
704
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = TextToVideoSDPipeline UpperCAmelCase_ : str = TEXT_TO_IMAGE_PARAMS UpperCAmelCase_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. UpperCAmelCase_ : Dict = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def snake_case ( self ) -> int: torch.manual_seed(0 ) A : Any = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) A : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) A : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) A : int = CLIPTextModel(__UpperCAmelCase ) A : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) A : Optional[Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[Any]: if str(__UpperCAmelCase ).startswith('''mps''' ): A : List[str] = torch.manual_seed(__UpperCAmelCase ) else: A : Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) A : Optional[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def snake_case ( self ) -> List[str]: A : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Dict = self.get_dummy_components() A : Any = TextToVideoSDPipeline(**__UpperCAmelCase ) A : Union[str, Any] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) A : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase ) A : Optional[int] = '''np''' A : Dict = sd_pipe(**__UpperCAmelCase ).frames A : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) A : Any = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ) -> Dict: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def snake_case ( self ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=1E-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def snake_case ( self ) -> Any: pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def snake_case ( self ) -> Tuple: pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def snake_case ( self ) -> List[str]: pass def snake_case ( self ) -> Dict: return super().test_progress_bar() @slow @skip_mps class __lowercase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ) -> int: A : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) A : List[Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) A : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) A : Optional[int] = pipe.to('''cuda''' ) A : List[Any] = '''Spiderman is surfing''' A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=25 , output_type='''pt''' ).frames A : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def snake_case ( self ) -> Union[str, Any]: A : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) A : Tuple = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) A : Any = pipe.to('''cuda''' ) A : int = '''Spiderman is surfing''' A : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames A : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
423
0
import colorsys from PIL import Image # type: ignore def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: _UpperCAmelCase = x _UpperCAmelCase = y for step in range(_lowerCAmelCase ): # noqa: B007 _UpperCAmelCase = a * a - b * b + x _UpperCAmelCase = 2 * a * b + y _UpperCAmelCase = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def __lowerCamelCase ( _lowerCAmelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def __lowerCamelCase ( _lowerCAmelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowerCAmelCase , 1 , 1 ) ) def __lowerCamelCase ( _lowerCAmelCase = 800 , _lowerCAmelCase = 600 , _lowerCAmelCase = -0.6 , _lowerCAmelCase = 0 , _lowerCAmelCase = 3.2 , _lowerCAmelCase = 50 , _lowerCAmelCase = True , ) -> Image.Image: _UpperCAmelCase = Image.new("RGB" , (image_width, image_height) ) _UpperCAmelCase = img.load() # loop through the image-coordinates for image_x in range(_lowerCAmelCase ): for image_y in range(_lowerCAmelCase ): # determine the figure-coordinates based on the image-coordinates _UpperCAmelCase = figure_width / image_width * image_height _UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width _UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height _UpperCAmelCase = get_distance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _UpperCAmelCase = get_color_coded_rgb(_lowerCAmelCase ) else: _UpperCAmelCase = get_black_and_white_rgb(_lowerCAmelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure __lowerCAmelCase = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
684
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: str , lowerCAmelCase: str ) -> Union[str, Any]: # Construct model if gpta_config_file == "": _UpperCAmelCase : Optional[int] = GPTaConfig() else: _UpperCAmelCase : Optional[Any] = GPTaConfig.from_json_file(lowerCAmelCase ) _UpperCAmelCase : Optional[int] = GPTaModel(lowerCAmelCase ) # Load weights from numpy load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Save pytorch-model _UpperCAmelCase : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME _UpperCAmelCase : Optional[int] = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , lowerCAmelCase ) print(F'Save configuration file to {pytorch_config_dump_path}' ) with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
300
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : Union[str, Any] ='''altclip_text_model''' def __init__( self :Optional[int], snake_case :Optional[Any]=25_0002, snake_case :int=1024, snake_case :int=24, snake_case :Dict=16, snake_case :str=4096, snake_case :Dict="gelu", snake_case :Dict=0.1, snake_case :Any=0.1, snake_case :Optional[Any]=514, snake_case :Dict=1, snake_case :Dict=0.0_2, snake_case :int=0.0_2, snake_case :List[str]=1e-0_5, snake_case :Optional[Any]=1, snake_case :Optional[int]=0, snake_case :List[Any]=2, snake_case :str="absolute", snake_case :Dict=True, snake_case :Optional[Any]=768, **snake_case :List[str], ): """simple docstring""" super().__init__(pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case) _lowercase =vocab_size _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =hidden_act _lowercase =intermediate_size _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =max_position_embeddings _lowercase =type_vocab_size _lowercase =initializer_range _lowercase =initializer_factor _lowercase =layer_norm_eps _lowercase =position_embedding_type _lowercase =use_cache _lowercase =project_dim class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : List[str] ='''altclip_vision_model''' def __init__( self :List[Any], snake_case :Optional[int]=768, snake_case :Dict=3072, snake_case :Optional[int]=512, snake_case :Dict=12, snake_case :List[str]=12, snake_case :Any=3, snake_case :int=224, snake_case :List[str]=32, snake_case :Tuple="quick_gelu", snake_case :Optional[Any]=1e-5, snake_case :Union[str, Any]=0.0, snake_case :Tuple=0.0_2, snake_case :str=1.0, **snake_case :int, ): """simple docstring""" super().__init__(**snake_case) _lowercase =hidden_size _lowercase =intermediate_size _lowercase =projection_dim _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =num_channels _lowercase =patch_size _lowercase =image_size _lowercase =initializer_range _lowercase =initializer_factor _lowercase =attention_dropout _lowercase =layer_norm_eps _lowercase =hidden_act @classmethod def UpperCamelCase__ ( cls :Union[str, Any], snake_case :Union[str, os.PathLike], **snake_case :Optional[Any]): """simple docstring""" cls._set_token_in_kwargs(snake_case) _lowercase , _lowercase =cls.get_config_dict(snake_case, **snake_case) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type') == "altclip": _lowercase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(snake_case, **snake_case) class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : int ='''altclip''' __lowerCAmelCase : Union[str, Any] =True def __init__( self :Union[str, Any], snake_case :Any=None, snake_case :str=None, snake_case :List[Any]=768, snake_case :Optional[Any]=2.6_5_9_2, **snake_case :Tuple): """simple docstring""" _lowercase =kwargs.pop('text_config_dict', snake_case) _lowercase =kwargs.pop('vision_config_dict', snake_case) super().__init__(**snake_case) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _lowercase ={} # This is the complete result when using `text_config_dict`. _lowercase =AltCLIPTextConfig(**snake_case).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _lowercase =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: _lowercase =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(snake_case) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: _lowercase ={} # This is the complete result when using `vision_config_dict`. _lowercase =AltCLIPVisionConfig(**snake_case).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _lowercase ={ str(snake_case): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _lowercase =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: _lowercase =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(snake_case) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict) if text_config is None: _lowercase ={} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.') if vision_config is None: _lowercase ={} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.') _lowercase =AltCLIPTextConfig(**snake_case) _lowercase =AltCLIPVisionConfig(**snake_case) _lowercase =projection_dim _lowercase =logit_scale_init_value _lowercase =1.0 @classmethod def UpperCamelCase__ ( cls :int, snake_case :AltCLIPTextConfig, snake_case :AltCLIPVisionConfig, **snake_case :Dict): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **snake_case) def UpperCamelCase__ ( self :Union[str, Any]): """simple docstring""" _lowercase =copy.deepcopy(self.__dict__) _lowercase =self.text_config.to_dict() _lowercase =self.vision_config.to_dict() _lowercase =self.__class__.model_type return output
557
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _SCREAMING_SNAKE_CASE = random.Random() if is_torch_available(): import torch def _snake_case (_snake_case : str , _snake_case : Optional[Any]=1.0 , _snake_case : int=None , _snake_case : Optional[int]=None) -> Optional[int]: if rng is None: _lowercase =global_rng _lowercase =[] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __init__( self :int, snake_case :List[Any], snake_case :List[str]=7, snake_case :Union[str, Any]=400, snake_case :List[Any]=2000, snake_case :Union[str, Any]=1, snake_case :Tuple=0.0, snake_case :Tuple=1_6000, snake_case :Optional[Any]=True, snake_case :List[Any]=True, ): """simple docstring""" _lowercase =parent _lowercase =batch_size _lowercase =min_seq_length _lowercase =max_seq_length _lowercase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _lowercase =feature_size _lowercase =padding_value _lowercase =sampling_rate _lowercase =return_attention_mask _lowercase =do_normalize def UpperCamelCase__ ( self :int): """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self :Tuple, snake_case :Optional[Any]=False, snake_case :int=False): """simple docstring""" def _flatten(snake_case :Optional[int]): return list(itertools.chain(*snake_case)) if equal_length: _lowercase =floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size _lowercase =[ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: _lowercase =[np.asarray(snake_case) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ): """simple docstring""" __lowerCAmelCase : Any =ASTFeatureExtractor def UpperCamelCase__ ( self :str): """simple docstring""" _lowercase =ASTFeatureExtractionTester(self) def UpperCamelCase__ ( self :int): """simple docstring""" _lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 _lowercase =[floats_list((1, x))[0] for x in range(800, 1400, 200)] _lowercase =[np.asarray(snake_case) for speech_input in speech_inputs] # Test not batched input _lowercase =feat_extract(speech_inputs[0], return_tensors='np').input_values _lowercase =feat_extract(np_speech_inputs[0], return_tensors='np').input_values self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3)) # Test batched _lowercase =feat_extract(snake_case, padding=snake_case, return_tensors='np').input_values _lowercase =feat_extract(snake_case, padding=snake_case, return_tensors='np').input_values for enc_seq_a, enc_seq_a in zip(snake_case, snake_case): self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3)) # Test 2-D numpy arrays are batched. _lowercase =[floats_list((1, x))[0] for x in (800, 800, 800)] _lowercase =np.asarray(snake_case) _lowercase =feat_extract(snake_case, return_tensors='np').input_values _lowercase =feat_extract(snake_case, return_tensors='np').input_values for enc_seq_a, enc_seq_a in zip(snake_case, snake_case): self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3)) @require_torch def UpperCamelCase__ ( self :Tuple): """simple docstring""" import torch _lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _lowercase =np.random.rand(100).astype(np.floataa) _lowercase =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _lowercase =feature_extractor.pad([{'input_values': inputs}], return_tensors='np') self.assertTrue(np_processed.input_values.dtype == np.floataa) _lowercase =feature_extractor.pad([{'input_values': inputs}], return_tensors='pt') self.assertTrue(pt_processed.input_values.dtype == torch.floataa) def UpperCamelCase__ ( self :Tuple, snake_case :Any): """simple docstring""" from datasets import load_dataset _lowercase =load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation') # automatic decoding with librispeech _lowercase =ds.sort('id').select(range(snake_case))[:num_samples]['audio'] return [x["array"] for x in speech_samples] @require_torch def UpperCamelCase__ ( self :str): """simple docstring""" _lowercase =torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9]) # fmt: on _lowercase =self._load_datasamples(1) _lowercase =ASTFeatureExtractor() _lowercase =feature_extractor(snake_case, return_tensors='pt').input_values self.assertEquals(input_values.shape, (1, 1024, 128)) self.assertTrue(torch.allclose(input_values[0, 0, :30], snake_case, atol=1e-4))
557
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a(): '''simple docstring''' snake_case_ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' snake_case_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) return image def a(lowercase__ ): '''simple docstring''' snake_case_ = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def a(lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' snake_case_ = dct.pop(lowercase__ ) snake_case_ = val def a(lowercase__ , lowercase__ ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases snake_case_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) snake_case_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict snake_case_ = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) ) snake_case_ = qkv_bias def a(lowercase__ ): '''simple docstring''' snake_case_ = 364 if 'coco' in model_name else 224 snake_case_ = InstructBlipVisionConfig(image_size=lowercase__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: snake_case_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: snake_case_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: snake_case_ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: snake_case_ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 snake_case_ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() snake_case_ = InstructBlipConfig(vision_config=lowercase__ , text_config=lowercase__ , qformer_config=lowercase__ ) return config, image_size @torch.no_grad() def a(lowercase__ , lowercase__=None , lowercase__=False ): '''simple docstring''' snake_case_ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: snake_case_ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) snake_case_ = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) snake_case_ , snake_case_ = get_blipa_config(lowercase__ ) snake_case_ = InstructBlipForConditionalGeneration(lowercase__ ).eval() snake_case_ = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } snake_case_ , snake_case_ = model_name_to_original[model_name] # load original model print('Loading original model...' ) snake_case_ = 'cuda:1' if torch.cuda.is_available() else 'cpu' snake_case_ = 'cuda:2' if torch.cuda.is_available() else 'cpu' snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess( name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ ) original_model.eval() print('Done!' ) # update state dict keys snake_case_ = original_model.state_dict() snake_case_ = create_rename_keys(lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): snake_case_ = state_dict.pop(lowercase__ ) if key.startswith('Qformer.bert' ): snake_case_ = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: snake_case_ = key.replace('self' , 'attention' ) if "llm_proj" in key: snake_case_ = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: snake_case_ = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): snake_case_ = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): snake_case_ = key.replace('t5' , 'language' ) snake_case_ = val # read in qv biases read_in_q_v_bias(lowercase__ , lowercase__ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(lowercase__ , strict=lowercase__ ) snake_case_ = load_demo_image() snake_case_ = 'What is unusual about this image?' # create processor snake_case_ = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=lowercase__ , image_std=lowercase__ ) snake_case_ = InstructBlipProcessor( image_processor=lowercase__ , tokenizer=lowercase__ , qformer_tokenizer=lowercase__ , ) snake_case_ = processor(images=lowercase__ , text=lowercase__ , return_tensors='pt' ).to(lowercase__ ) # make sure processor creates exact same pixel values snake_case_ = vis_processors['eval'](lowercase__ ).unsqueeze(0 ).to(lowercase__ ) snake_case_ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase__ ) original_model.to(lowercase__ ) hf_model.to(lowercase__ ) with torch.no_grad(): if "vicuna" in model_name: snake_case_ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits snake_case_ = hf_model(**lowercase__ ).logits else: snake_case_ = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits snake_case_ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowercase__ ) snake_case_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) snake_case_ = hf_model(**lowercase__ , labels=lowercase__ ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape snake_case_ = 1e-4 if 'vicuna' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , lowercase__ , atol=lowercase__ ) print('Looks ok!' ) print('Generating with original model...' ) snake_case_ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) snake_case_ = hf_model.generate( **lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? snake_case_ = 2 print('Original generation:' , lowercase__ ) snake_case_ = processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ ) snake_case_ = [text.strip() for text in output_text] print('HF generation:' , lowercase__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(lowercase__ ) hf_model.save_pretrained(lowercase__ ) if push_to_hub: processor.push_to_hub(f"""Salesforce/{model_name}""" ) hf_model.push_to_hub(f"""Salesforce/{model_name}""" ) if __name__ == "__main__": A = argparse.ArgumentParser() A = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) A = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
187
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) A = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
187
1
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar lowerCAmelCase : List[str] = TypeVar('''T''') class UpperCAmelCase__ ( Generic[T] ): def __init__( self , UpperCamelCase ) -> List[Any]: __lowerCAmelCase = data __lowerCAmelCase = self __lowerCAmelCase = 0 class UpperCAmelCase__ ( Generic[T] ): def __init__( self ) -> int: # map from node name to the node object __lowerCAmelCase = {} def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: # create a new set with x as its member __lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: # find the set x belongs to (with path-compression) __lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: # helper function for union operation if nodea.rank > nodea.rank: __lowerCAmelCase = nodea else: __lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> List[str]: # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class UpperCAmelCase__ ( Generic[T] ): def __init__( self ) -> Union[str, Any]: # connections: map from the node to the neighbouring nodes (with weights) __lowerCAmelCase = {} def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]: # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCAmelCase = {} def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) __lowerCAmelCase = weight __lowerCAmelCase = weight def UpperCAmelCase_ ( self ) -> Any: __lowerCAmelCase = [] __lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCamelCase : x[2] ) # creating the disjoint set __lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edges[index] index += 1 __lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) __lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
def __A ( _A ): """simple docstring""" if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __a = gray_code_sequence_string(_A ) # # convert them to integers for i in range(len(_A ) ): __a = int(sequence[i] , 2 ) return sequence def __A ( _A ): """simple docstring""" if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __a = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __a = gray_code_sequence_string(bit_count - 1 ) __a = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __a = '''0''' + smaller_sequence[i] sequence.append(_A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __a = '''1''' + smaller_sequence[i] sequence.append(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
197
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :str = XCLIPTextConfig() # derive patch size from model name __magic_name__ :Union[str, Any] = model_name.find('''patch''' ) __magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case ) if "large" in model_name: __magic_name__ :Dict = 7_6_8 __magic_name__ :int = 3_0_7_2 __magic_name__ :List[Any] = 1_2 __magic_name__ :str = 1_0_2_4 __magic_name__ :Any = 4_0_9_6 __magic_name__ :Optional[Any] = 1_6 __magic_name__ :Union[str, Any] = 2_4 __magic_name__ :Union[str, Any] = 7_6_8 __magic_name__ :Tuple = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __magic_name__ :List[str] = 3_3_6 __magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case ) if "large" in model_name: __magic_name__ :str = 7_6_8 return config def __lowercase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": __magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: __magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: __magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: __magic_name__ :Any = name.replace('''c_proj''', '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ :List[Any] = name.replace('''positional''', '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' ) return name def __lowercase ( snake_case, snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ :Any = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __magic_name__ :str = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ :List[Any] = key_split[3] __magic_name__ :List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ :List[Any] = val[ :dim, : ] __magic_name__ :List[str] = val[ dim : dim * 2, : ] __magic_name__ :List[str] = val[ -dim:, : ] else: __magic_name__ :str = val[ :dim ] __magic_name__ :Optional[int] = val[ dim : dim * 2 ] __magic_name__ :Any = val[ -dim: ] else: if "weight" in key: __magic_name__ :int = val[ :dim, : ] __magic_name__ :Union[str, Any] = val[ dim : dim * 2, : ] __magic_name__ :List[Any] = val[ -dim:, : ] else: __magic_name__ :Union[str, Any] = val[:dim] __magic_name__ :str = val[ dim : dim * 2 ] __magic_name__ :Dict = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ :List[Any] = key_split[2] __magic_name__ :Any = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Optional[int] = val[dim : dim * 2, :] __magic_name__ :int = val[-dim:, :] else: __magic_name__ :Tuple = val[:dim] __magic_name__ :Optional[int] = val[dim : dim * 2] __magic_name__ :Optional[int] = val[-dim:] else: __magic_name__ :Any = key_split[2] __magic_name__ :List[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ :Union[str, Any] = val[:dim, :] __magic_name__ :Tuple = val[ dim : dim * 2, : ] __magic_name__ :str = val[-dim:, :] else: __magic_name__ :int = val[:dim] __magic_name__ :Any = val[ dim : dim * 2 ] __magic_name__ :str = val[-dim:] else: __magic_name__ :Tuple = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ :List[Any] = val.T __magic_name__ :Optional[Any] = val return orig_state_dict def __lowercase ( snake_case ): """simple docstring""" if num_frames == 8: __magic_name__ :Any = '''eating_spaghetti_8_frames.npy''' elif num_frames == 1_6: __magic_name__ :List[Any] = '''eating_spaghetti.npy''' elif num_frames == 3_2: __magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy''' __magic_name__ :str = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', ) __magic_name__ :List[Any] = np.load(snake_case ) return list(snake_case ) def __lowercase ( snake_case, snake_case=None, snake_case=False ): """simple docstring""" __magic_name__ :Union[str, Any] = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ :Optional[int] = model_to_url[model_name] __magic_name__ :List[str] = 8 if "16-frames" in model_name: __magic_name__ :List[Any] = 1_6 elif "shot" in model_name: __magic_name__ :Dict = 3_2 __magic_name__ :str = get_xclip_config(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __magic_name__ :Any = '''pytorch_model.bin''' gdown.cached_download(snake_case, snake_case, quiet=snake_case ) __magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model'''] else: __magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __magic_name__ :List[str] = convert_state_dict(snake_case, snake_case ) __magic_name__ :List[Any] = XCLIPModel(snake_case ) __magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4 __magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case ) __magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case ) __magic_name__ :List[Any] = prepare_video(snake_case ) __magic_name__ :str = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case ) print('''Shape of pixel values:''', inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ :Tuple = model(**snake_case ) # Verify outputs __magic_name__ :Any = outputs.logits_per_video __magic_name__ :str = logits_per_video.softmax(dim=1 ) print('''Probs:''', snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] ) elif model_name == "xclip-base-patch16": __magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] ) elif model_name == "xclip-large-patch14": __magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case, snake_case, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case, organization='''nielsr''' ) processor.push_to_hub(snake_case, organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
0
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase: str = logging.get_logger(__name__) class a__( lowerCamelCase__ ): lowercase__ = ["""pixel_values"""] def __init__( self : Optional[Any] , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__snake_case : Tuple , ): super().__init__(**__snake_case ) a : List[Any] = size if size is not None else {'shortest_edge': 2_24} a : List[Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) a : List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} a : Optional[Any] = get_size_dict(__snake_case , param_name='crop_size' ) a : Any = do_resize a : Optional[int] = size a : Any = resample a : Tuple = do_center_crop a : List[str] = crop_size a : List[Any] = do_rescale a : List[str] = rescale_factor a : Union[str, Any] = do_normalize a : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : Dict , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ): a : Optional[Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: a : Tuple = int((2_56 / 2_24) * size['shortest_edge'] ) a : Optional[Any] = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case ) a : Tuple = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( __snake_case , size=(size_dict['height'], size_dict['width']) , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : List[str] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ): a : Tuple = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : int , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ): return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : Any , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ): return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : Tuple , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[TensorType] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : int , ): a : Optional[int] = do_resize if do_resize is not None else self.do_resize a : str = resample if resample is not None else self.resample a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop a : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale a : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor a : str = do_normalize if do_normalize is not None else self.do_normalize a : List[Any] = image_mean if image_mean is not None else self.image_mean a : Any = image_std if image_std is not None else self.image_std a : Optional[int] = size if size is not None else self.size a : List[Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) a : List[str] = crop_size if crop_size is not None else self.crop_size a : Tuple = get_size_dict(__snake_case , param_name='crop_size' ) a : str = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a : List[str] = [to_numpy_array(__snake_case ) for image in images] if do_resize: a : Union[str, Any] = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images] if do_center_crop: a : List[str] = [self.center_crop(__snake_case , __snake_case ) for image in images] if do_rescale: a : Tuple = [self.rescale(__snake_case , __snake_case ) for image in images] if do_normalize: a : Optional[Any] = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images] a : List[str] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] a : Any = {'pixel_values': images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
195
'''simple docstring''' from pathlib import Path import fire def lowerCamelCase__ ( _A , _A , _A ): a : Optional[Any] = Path(_A ) a : Tuple = Path(_A ) dest_dir.mkdir(exist_ok=_A ) for path in src_dir.iterdir(): a : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n] a : List[str] = dest_dir.joinpath(path.name ) print(_A ) dest_path.open('w' ).write('\n'.join(_A ) ) if __name__ == "__main__": fire.Fire(minify)
195
1
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version __a = get_logger(__name__) class __lowercase : UpperCamelCase = '''dummy_data''' UpperCamelCase = '''datasets''' UpperCamelCase = False def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Union[Version, str] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[List[Callable]] = None , ) -> Tuple: """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = dataset_name UpperCAmelCase = cache_dir UpperCAmelCase = use_local_dummy_data UpperCAmelCase = config # download_callbacks take a single url as input UpperCAmelCase = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root UpperCAmelCase = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general UpperCAmelCase = str(__lowerCamelCase ) # to be downloaded UpperCAmelCase = None UpperCAmelCase = None @property def _lowercase ( self : Any ) -> str: """simple docstring""" if self._dummy_file is None: UpperCAmelCase = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("""dummy""" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("""dummy""" , self.version_name ) @property def _lowercase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return os.path.join(self.dummy_data_folder , """dummy_data.zip""" ) def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) UpperCAmelCase = cached_path( __lowerCamelCase , cache_dir=self.cache_dir , extract_compressed_file=__lowerCamelCase , force_extract=__lowerCamelCase ) return os.path.join(__lowerCamelCase , self.dummy_file_name ) @property def _lowercase ( self : Dict ) -> Dict: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" if self._bucket_url is None: UpperCAmelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) ) return self._bucket_url @property def _lowercase ( self : Any ) -> int: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] ) def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[str] , *__lowerCamelCase : Dict ) -> List[str]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested UpperCAmelCase = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned UpperCAmelCase = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCamelCase , __lowerCamelCase ): return self.create_dummy_data_dict(__lowerCamelCase , __lowerCamelCase ) elif isinstance(__lowerCamelCase , (list, tuple) ): return self.create_dummy_data_list(__lowerCamelCase , __lowerCamelCase ) else: return self.create_dummy_data_single(__lowerCamelCase , __lowerCamelCase ) def _lowercase ( self : Dict , __lowerCamelCase : List[str] , *__lowerCamelCase : str ) -> str: """simple docstring""" return self.download_and_extract(__lowerCamelCase ) def _lowercase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Any: """simple docstring""" return self.download_and_extract(__lowerCamelCase ) def _lowercase ( self : int , __lowerCamelCase : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> int: """simple docstring""" return path def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return {} def _lowercase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCamelCase , __lowerCamelCase ): for single_url in single_urls: download_callback(__lowerCamelCase ) else: UpperCAmelCase = single_urls download_callback(__lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCAmelCase = [os.path.join(__lowerCamelCase , urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) ) for x in single_urls] else: UpperCAmelCase = single_urls UpperCAmelCase = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) ) UpperCAmelCase = value # make sure that values are unique if all(isinstance(__lowerCamelCase , __lowerCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique UpperCAmelCase = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ) -> int: """simple docstring""" UpperCAmelCase = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one UpperCAmelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __lowerCamelCase ) ) for url in data_url ) UpperCAmelCase = all( url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): UpperCAmelCase = [data_url[0]] * len(__lowerCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) ) dummy_data_list.append(__lowerCamelCase ) return dummy_data_list def _lowercase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Optional[Any]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(__lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) ) if os.path.exists(__lowerCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" pass def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" pass def _lowercase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Dict: """simple docstring""" def _iter_archive_members(__lowerCamelCase : int ): # this preserves the order of the members inside the ZIP archive UpperCAmelCase = Path(self.dummy_file ).parent UpperCAmelCase = path.relative_to(__lowerCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: UpperCAmelCase = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCamelCase ) UpperCAmelCase = Path(__lowerCamelCase ) UpperCAmelCase = _iter_archive_members(__lowerCamelCase ) if self.use_local_dummy_data else path.rglob("""*""" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ): yield file_path.relative_to(__lowerCamelCase ).as_posix(), file_path.open("""rb""" ) def _lowercase ( self : str , __lowerCamelCase : List[Any] ) -> str: """simple docstring""" if not isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCAmelCase = [paths] for path in paths: if os.path.isfile(__lowerCamelCase ): if os.path.basename(__lowerCamelCase ).startswith((""".""", """__""") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCamelCase ): if os.path.basename(__lowerCamelCase ).startswith((""".""", """__""") ): continue dirnames.sort() for filename in sorted(__lowerCamelCase ): if filename.startswith((""".""", """__""") ): continue yield os.path.join(__lowerCamelCase , __lowerCamelCase )
377
def _UpperCamelCase ( lowerCAmelCase_ ) ->int: if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: UpperCAmelCase = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _UpperCamelCase ( lowerCAmelCase_ ) ->int: UpperCAmelCase = 0 UpperCAmelCase = 2 while digits < n: index += 1 UpperCAmelCase = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int: return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
377
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _A ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' _lowercase = DebertaVaTokenizer _lowercase = DebertaVaTokenizerFast _lowercase = True _lowercase = True def __lowerCAmelCase ( self : Optional[Any] )-> List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case__ : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : List[str] , lowerCamelCase : str )-> Any: snake_case__ : int = "this is a test" snake_case__ : Tuple = "this is a test" return input_text, output_text def __lowerCAmelCase ( self : Dict )-> List[str]: snake_case__ : Tuple = "<pad>" snake_case__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def __lowerCAmelCase ( self : int )-> Dict: snake_case__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """[PAD]""" ) self.assertEqual(len(UpperCamelCase_ ) , 30_001 ) def __lowerCAmelCase ( self : Optional[int] )-> str: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def __lowerCAmelCase ( self : int )-> List[str]: # fmt: off snake_case__ : List[str] = " \tHeLLo!how \n Are yoU? " snake_case__ : Tuple = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on snake_case__ : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) snake_case__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : List[str] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) snake_case__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __lowerCAmelCase ( self : int )-> Union[str, Any]: pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __lowerCAmelCase ( self : Optional[Any] )-> int: pass def __lowerCAmelCase ( self : List[str] )-> List[str]: # fmt: off snake_case__ : List[Any] = "I was born in 92000, and this is falsé." snake_case__ : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on snake_case__ : Optional[Any] = DebertaVaTokenizer(UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Dict = DebertaVaTokenizerFast(UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] )-> Optional[Any]: # fmt: off snake_case__ : List[str] = "I was born in 92000, and this is falsé." snake_case__ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on snake_case__ : str = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Union[str, Any] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : Tuple )-> Any: # fmt: off snake_case__ : Optional[int] = "I was born in 92000, and this is falsé." snake_case__ : str = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on snake_case__ : List[str] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : List[str] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : Optional[Any] )-> int: # fmt: off snake_case__ : Union[str, Any] = "I was born in 92000, and this is falsé." snake_case__ : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on snake_case__ : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : int = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : Tuple )-> Optional[Any]: # fmt: off snake_case__ : Optional[int] = " \tHeLLo!how \n Are yoU? " snake_case__ : int = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on snake_case__ : int = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Tuple = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ ) snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : Any )-> str: snake_case__ : List[Any] = self.get_tokenizer() snake_case__ : Union[str, Any] = self.get_rust_tokenizer() snake_case__ : Union[str, Any] = "I was born in 92000, and this is falsé." snake_case__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) snake_case__ : Optional[int] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Optional[int] = self.get_rust_tokenizer() snake_case__ : int = tokenizer.encode(UpperCamelCase_ ) snake_case__ : Any = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : int )-> int: snake_case__ : str = "This is a test" snake_case__ : List[Any] = [13, 1, 4_398, 25, 21, 1_289] snake_case__ : Optional[int] = ["▁", "T", "his", "▁is", "▁a", "▁test"] snake_case__ : Dict = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] snake_case__ : int = DebertaVaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) snake_case__ : List[Any] = DebertaVaTokenizerFast(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) snake_case__ : Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Tuple = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : str = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Optional[int] = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : str = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) # fmt: off snake_case__ : Dict = "I was born in 92000, and this is falsé." snake_case__ : Dict = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] snake_case__ : Dict = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] snake_case__ : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on snake_case__ : List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Dict = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self : Any )-> Optional[int]: snake_case__ : List[str] = DebertaVaTokenizer(UpperCamelCase_ ) snake_case__ : Optional[Any] = tokenizer.encode("""sequence builders""" ) snake_case__ : int = tokenizer.encode("""multi-sequence build""" ) snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) snake_case__ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase_ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase_ , ) @slow def __lowerCAmelCase ( self : List[str] )-> int: # fmt: off snake_case__ : int = {"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
710
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( UpperCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : NestedDataStructureLike[PathLike] , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , **lowerCamelCase : str , )-> str: super().__init__( lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , ) snake_case__ : List[Any] = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths} snake_case__ : Optional[int] = Text( cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , **lowerCamelCase , ) def __lowerCAmelCase ( self : List[str] )-> str: # Build iterable dataset if self.streaming: snake_case__ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: snake_case__ : List[str] = None snake_case__ : Dict = None snake_case__ : Optional[Any] = None snake_case__ : List[Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , ) snake_case__ : Tuple = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
172
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __SCREAMING_SNAKE_CASE (__A ): """simple docstring""" _a : List[Any] = ['''image_processor''', '''tokenizer'''] _a : List[Any] = '''ViTImageProcessor''' _a : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ): """simple docstring""" a_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase__ , ) a_ = kwargs.pop('feature_extractor' ) a_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ): """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError('You have to specify either text, visual prompt or images.' ) if text is not None and visual_prompt is not None: raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' ) if text is not None: a_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if visual_prompt is not None: a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if images is not None: a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if visual_prompt is not None and images is not None: a_ = { 'pixel_values': image_features.pixel_values, 'conditional_pixel_values': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: a_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: a_ = { 'conditional_pixel_values': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ ) def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def _a ( self ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , ) return self.image_processor_class @property def _a ( self ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , ) return self.image_processor
536
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger("transformers.models.speecht5") def __UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Any ): """simple docstring""" hf_model.apply_weight_norm() a_ = checkpoint['input_conv.weight_g'] a_ = checkpoint['input_conv.weight_v'] a_ = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): a_ = checkpoint[F'upsamples.{i}.1.weight_g'] a_ = checkpoint[F'upsamples.{i}.1.weight_v'] a_ = checkpoint[F'upsamples.{i}.1.bias'] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): a_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g'] a_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v'] a_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias'] a_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g'] a_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v'] a_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias'] a_ = checkpoint['output_conv.1.weight_g'] a_ = checkpoint['output_conv.1.weight_v'] a_ = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : Tuple=None , ): """simple docstring""" if config_path is not None: a_ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ ) else: a_ = SpeechTaHifiGanConfig() a_ = SpeechTaHifiGan(lowercase_ ) a_ = torch.load(lowercase_ ) load_weights(orig_checkpoint['model']['generator'] , lowercase_ , lowercase_ ) a_ = np.load(lowercase_ ) a_ = stats[0].reshape(-1 ) a_ = stats[1].reshape(-1 ) a_ = torch.from_numpy(lowercase_ ).float() a_ = torch.from_numpy(lowercase_ ).float() model.save_pretrained(lowercase_ ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __lowerCAmelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
536
1
def a_ (_lowerCAmelCase : int = 100 )-> int: snake_case: int = n * (n + 1) * (2 * n + 1) / 6 snake_case: Optional[int] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
708
from __future__ import annotations from dataclasses import dataclass @dataclass class lowerCamelCase : __lowerCamelCase = 42 __lowerCamelCase = None __lowerCamelCase = None def a_ (_lowerCAmelCase : TreeNode | None )-> bool: # Validation def is_valid_tree(_lowerCAmelCase : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_lowerCAmelCase ): raise ValueError( """Each node should be type of TreeNode and data should be float.""" ) def is_binary_search_tree_recursive_check( _lowerCAmelCase : TreeNode | None , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _lowerCAmelCase , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _lowerCAmelCase ) ) return is_binary_search_tree_recursive_check(_lowerCAmelCase , -float("""inf""" ) , float("""inf""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
164
0
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self , *__lowercase , **__lowercase) -> List[str]: super().__init__(*__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]: __UpperCamelCase :int = deque([]) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowercase) __UpperCamelCase :str = self.values[key] def UpperCamelCase__ ( self) -> int: return ( sum(self.charge_factor - len(__lowercase) for slot in self.values) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , __lowercase , __lowercase=None) -> str: if not ( len(self.values[key]) == self.charge_factor and self.values.count(__lowercase) == 0 ): return key return super()._collision_resolution(__lowercase , __lowercase)
167
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart __lowercase = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } __lowercase = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } @lru_cache() def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :List[str] = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __UpperCamelCase :Tuple = bs[:] __UpperCamelCase :int = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __UpperCamelCase :List[Any] = [chr(SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = set() __UpperCamelCase :List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCamelCase :Optional[Any] = char return pairs class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : Dict = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = ["""input_ids""", """attention_mask"""] def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> int: __UpperCamelCase :Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else bos_token __UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else eos_token __UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else sep_token __UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else cls_token __UpperCamelCase :Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else unk_token __UpperCamelCase :Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token super().__init__( errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) with open(__lowercase , encoding='''utf-8''') as vocab_handle: __UpperCamelCase :List[str] = json.load(__lowercase) __UpperCamelCase :List[Any] = {v: k for k, v in self.encoder.items()} __UpperCamelCase :Union[str, Any] = errors # how to handle errors in decoding __UpperCamelCase :Tuple = bytes_to_unicode() __UpperCamelCase :Tuple = {v: k for k, v in self.byte_encoder.items()} with open(__lowercase , encoding='''utf-8''') as merges_handle: __UpperCamelCase :Optional[int] = merges_handle.read().split('''\n''')[1:-1] __UpperCamelCase :Any = [tuple(merge.split()) for merge in bpe_merges] __UpperCamelCase :List[Any] = dict(zip(__lowercase , range(len(__lowercase)))) __UpperCamelCase :Union[str, Any] = {} __UpperCamelCase :str = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCamelCase :List[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''') @property def UpperCamelCase__ ( self) -> str: return len(self.encoder) def UpperCamelCase__ ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def UpperCamelCase__ ( self , __lowercase) -> str: if token in self.cache: return self.cache[token] __UpperCamelCase :Tuple = tuple(__lowercase) __UpperCamelCase :Optional[int] = get_pairs(__lowercase) if not pairs: return token while True: __UpperCamelCase :Tuple = min(__lowercase , key=lambda __lowercase: self.bpe_ranks.get(__lowercase , float('''inf'''))) if bigram not in self.bpe_ranks: break __UpperCamelCase , __UpperCamelCase :Optional[int] = bigram __UpperCamelCase :Tuple = [] __UpperCamelCase :Dict = 0 while i < len(__lowercase): try: __UpperCamelCase :Dict = word.index(__lowercase , __lowercase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) __UpperCamelCase :Union[str, Any] = j if word[i] == first and i < len(__lowercase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 __UpperCamelCase :Tuple = tuple(__lowercase) __UpperCamelCase :List[Any] = new_word if len(__lowercase) == 1: break else: __UpperCamelCase :Optional[int] = get_pairs(__lowercase) __UpperCamelCase :Any = ''' '''.join(__lowercase) __UpperCamelCase :Dict = word return word def UpperCamelCase__ ( self , __lowercase) -> Any: __UpperCamelCase :Optional[Any] = [] for token in re.findall(self.pat , __lowercase): __UpperCamelCase :List[Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase).split(''' ''')) return bpe_tokens def UpperCamelCase__ ( self , __lowercase) -> Optional[int]: return self.encoder.get(__lowercase , self.encoder.get(self.unk_token)) def UpperCamelCase__ ( self , __lowercase) -> Optional[int]: return self.decoder.get(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> List[Any]: __UpperCamelCase :List[str] = ''''''.join(__lowercase) __UpperCamelCase :Tuple = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors) return text def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]: if not os.path.isdir(__lowercase): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return __UpperCamelCase :Any = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) __UpperCamelCase :Dict = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file''']) with open(__lowercase , '''w''' , encoding='''utf-8''') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase) + '''\n''') __UpperCamelCase :int = 0 with open(__lowercase , '''w''' , encoding='''utf-8''') as writer: writer.write('''#version: 0.2\n''') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''') __UpperCamelCase :Optional[int] = token_index writer.write(''' '''.join(__lowercase) + '''\n''') index += 1 return vocab_file, merge_file def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase :Optional[Any] = [self.cls_token_id] __UpperCamelCase :str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase) if token_ids_a is None: return [1] + ([0] * len(__lowercase)) + [1] return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1] def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]: __UpperCamelCase :Optional[int] = [self.sep_token_id] __UpperCamelCase :Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def UpperCamelCase__ ( self , __lowercase , __lowercase=False , **__lowercase) -> Any: __UpperCamelCase :List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(__lowercase) > 0 and not text[0].isspace()): __UpperCamelCase :Optional[Any] = ''' ''' + text return (text, kwargs)
167
1
"""simple docstring""" import requests A__ : Optional[int] = """YOUR API KEY""" def a__ ( lowerCAmelCase : str , lowerCAmelCase : str = giphy_api_key ): '''simple docstring''' UpperCAmelCase__ : str = "+".join(query.split() ) UpperCAmelCase__ : Dict = F"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" UpperCAmelCase__ : List[Any] = requests.get(lowerCAmelCase ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("""\n""".join(get_gifs("""space ship""")))
700
"""simple docstring""" from timeit import timeit def a__ ( lowerCAmelCase : int ): '''simple docstring''' if number < 0: raise ValueError("the value of input must not be negative" ) UpperCAmelCase__ : Tuple = 0 while number: number &= number - 1 result += 1 return result def a__ ( lowerCAmelCase : int ): '''simple docstring''' if number < 0: raise ValueError("the value of input must not be negative" ) UpperCAmelCase__ : Union[str, Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def a__ ( ): '''simple docstring''' def do_benchmark(lowerCAmelCase : int ) -> None: UpperCAmelCase__ : Dict = "import __main__ as z" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" ) UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" ) UpperCAmelCase__ : Any = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(lowerCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
660
0
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]: if length <= 0 or not isinstance(a , a ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(a )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
239
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCAmelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCAmelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def _SCREAMING_SNAKE_CASE ( a , a ) -> str | None: __A : str = "" __A : int __A : int __A : int for keychar, cipherchar in zip(cycle(a ) , a ): __A : List[Any] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(a ) return decoded def _SCREAMING_SNAKE_CASE ( a ) -> list[str]: __A : list[str] = [] for key in product(a , repeat=3 ): __A : str = try_key(a , a ) if encoded is not None: possibles.append(a ) return possibles def _SCREAMING_SNAKE_CASE ( a , a ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def _SCREAMING_SNAKE_CASE ( a = "p059_cipher.txt" ) -> int: __A : list[int] __A : list[str] __A : str __A : str __A : str = Path(a ).parent.joinpath(a ).read_text(encoding='utf-8' ) __A : Union[str, Any] = [int(a ) for number in data.strip().split(',' )] __A : Any = filter_valid_chars(a ) for common_word in COMMON_WORDS: __A : Tuple = filter_common_word(a , a ) if len(a ) == 1: break __A : Union[str, Any] = possibles[0] return sum(ord(a ) for char in decoded_text ) if __name__ == "__main__": print(F"""{solution() = }""")
239
1
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __A ( a_ : Any=None ,a_ : List[Any]=None ): return field(default_factory=lambda: default ,metadata=a_ ) @dataclass class lowerCamelCase : snake_case_ = field( metadata={"help": "The csv file to plot."} , ) snake_case_ = field( default=_A , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , ) snake_case_ = field( default=_A , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , ) snake_case_ = field( default=_A , metadata={"help": "Disable logarithmic scale when plotting"} , ) snake_case_ = field( default=_A , metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." } , ) snake_case_ = field( default=_A , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , ) snake_case_ = list_field( default=_A , metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def __A ( a_ : Tuple ): try: int(a_ ) return True except ValueError: return False def __A ( a_ : int ): try: float(a_ ) return True except ValueError: return False class lowerCamelCase : def __init__( self , a_ ): lowerCAmelCase : Optional[Any] = args lowerCAmelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="" ) as csv_file: lowerCAmelCase : str = csv.DictReader(a_ ) for row in reader: lowerCAmelCase : Tuple = row["model"] self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) ) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) ) if can_convert_to_int(row["result"] ): # value is not None lowerCAmelCase : Union[str, Any] = int(row["result"] ) elif can_convert_to_float(row["result"] ): # value is not None lowerCAmelCase : Optional[int] = float(row["result"] ) def _lowerCamelCase ( self ): lowerCAmelCase : Any = plt.subplots() lowerCAmelCase : int = "Time usage" if self.args.is_time else "Memory usage" lowerCAmelCase : List[Any] = title_str + " for training" if self.args.is_train else title_str + " for inference" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("log" ) ax.set_yscale("log" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): lowerCAmelCase : str = sorted(set(self.result_dict[model_name]["bsz"] ) ) lowerCAmelCase : List[str] = sorted(set(self.result_dict[model_name]["seq_len"] ) ) lowerCAmelCase : Union[str, Any] = self.result_dict[model_name]["result"] (lowerCAmelCase) : str = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) lowerCAmelCase : Union[str, Any] = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: lowerCAmelCase : int = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a_ , ) else: lowerCAmelCase : Any = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) (lowerCAmelCase) : Any = ( ("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz") ) lowerCAmelCase : Union[str, Any] = np.asarray(a_ , a_ )[: len(a_ )] plt.scatter( a_ , a_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(a_ , a_ , "--" ) title_str += F''' {label_model_name} vs.''' lowerCAmelCase : List[str] = title_str[:-4] lowerCAmelCase : List[Any] = "Time in s" if self.args.is_time else "Memory in MB" # plot plt.title(a_ ) plt.xlabel(a_ ) plt.ylabel(a_ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __A ( ): lowerCAmelCase : Optional[Any] = HfArgumentParser(a_ ) lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] lowerCAmelCase : str = Plot(args=a_ ) plot.plot() if __name__ == "__main__": main()
717
'''simple docstring''' def __A ( a_ : int ): assert ( isinstance(a_ ,a_ ) and number_of_steps > 0 ), f'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 lowerCAmelCase , lowerCAmelCase : int = 1, 1 for _ in range(number_of_steps - 1 ): lowerCAmelCase , lowerCAmelCase : Union[str, Any] = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
551
0
'''simple docstring''' def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[Any]: '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a_ = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b" a_ = str(bin(lowerCamelCase__ ) )[2:] a_ = max(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) ,b_binary.zfill(lowerCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
685
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __A =logging.getLogger(__name__) def lowerCamelCase_ ( ): lowerCamelCase_ = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowerCamelCase__ , default=1_0_0_0 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowerCamelCase__ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) lowerCamelCase_ = parser.parse_args() return args def lowerCamelCase_ ( lowerCamelCase__ ): def fn(lowerCamelCase__ ): return tokenizer(examples["text"] ) return fn def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = [] for i in range(len(tokenized_data["input_ids"] ) ): lowerCamelCase_ = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowerCamelCase_ = tf.train.Features(feature=lowerCamelCase__ ) lowerCamelCase_ = tf.train.Example(features=lowerCamelCase__ ) lowerCamelCase_ = example.SerializeToString() records.append(lowerCamelCase__ ) return records def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowerCamelCase_ = min(len(lowerCamelCase__ ) , args.limit ) lowerCamelCase_ = dataset.select(range(lowerCamelCase__ ) ) print(F'Limiting the dataset to {args.limit} entries.' ) lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowerCamelCase_ = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) else: lowerCamelCase_ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowerCamelCase_ = tokenize_function(lowerCamelCase__ ) lowerCamelCase_ = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase__ ): # Concatenate all texts. lowerCamelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()} lowerCamelCase_ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowerCamelCase_ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowerCamelCase_ = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result lowerCamelCase_ = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_0_0_0 , num_proc=4 ) lowerCamelCase_ = 0 lowerCamelCase_ = 0 for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ): lowerCamelCase_ = grouped_dataset[shard : shard + args.shard_size] lowerCamelCase_ = len(dataset_snapshot["input_ids"] ) lowerCamelCase_ = os.path.join(lowerCamelCase__ , F'dataset-{shard_count}-{records_containing}.tfrecord' ) lowerCamelCase_ = get_serialized_examples(lowerCamelCase__ ) with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file: for i in range(len(lowerCamelCase__ ) ): lowerCamelCase_ = serialized_examples[i] out_file.write(lowerCamelCase__ ) print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(F'split-{args.split}-records-count.txt' , "w" ) as f: print(F'Total {args.split} records: {total_records}' , file=lowerCamelCase__ ) if __name__ == "__main__": __A =parse_args() main(args)
463
0
from math import sqrt def a ( snake_case__: int ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' must been an int and positive" lowercase_ = True # 0 and 1 are none primes. if number <= 1: lowercase_ = False for divisor in range(2 , int(round(sqrt(_lowercase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowercase_ = False break # precondition assert isinstance(_lowercase , _lowercase ), "'status' must been from type bool" return status def a ( snake_case__: Optional[Any] ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowercase_ = list(range(2 , n + 1 ) ) lowercase_ = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowercase ) ): for j in range(i + 1 , len(_lowercase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowercase_ = 0 # filters actual prime numbers. lowercase_ = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def a ( snake_case__: Union[str, Any] ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2" lowercase_ = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowercase ): ans.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def a ( snake_case__: int ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and number >= 0, "'number' must been an int and >= 0" lowercase_ = [] # this list will be returns of the function. # potential prime number factors. lowercase_ = 2 lowercase_ = number if number == 0 or number == 1: ans.append(_lowercase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowercase ): while quotient != 1: if is_prime(_lowercase ) and (quotient % factor == 0): ans.append(_lowercase ) quotient /= factor else: factor += 1 else: ans.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def a ( snake_case__: Any ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowercase_ = 0 # prime factorization of 'number' lowercase_ = prime_factorization(_lowercase ) lowercase_ = max(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int" return ans def a ( snake_case__: int ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowercase_ = 0 # prime factorization of 'number' lowercase_ = prime_factorization(_lowercase ) lowercase_ = min(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int" return ans def a ( snake_case__: Tuple ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowercase ), "compare bust been from type bool" return number % 2 == 0 def a ( snake_case__: Optional[Any] ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowercase ), "compare bust been from type bool" return number % 2 != 0 def a ( snake_case__: Any ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and (number > 2) and is_even(_lowercase ) ), "'number' must been an int, even and > 2" lowercase_ = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowercase_ = get_prime_numbers(_lowercase ) lowercase_ = len(_lowercase ) # run variable for while-loops. lowercase_ = 0 lowercase_ = None # exit variable. for break up the loops lowercase_ = True while i < len_pn and loop: lowercase_ = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowercase_ = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowercase , _lowercase ) and (len(_lowercase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def a ( snake_case__: Optional[Any] , snake_case__: Tuple ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowercase_ = 0 while numbera != 0: lowercase_ = numbera % numbera lowercase_ = numbera lowercase_ = rest # precondition assert isinstance(_lowercase , _lowercase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def a ( snake_case__: Optional[int] , snake_case__: Optional[int] ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowercase_ = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowercase_ = prime_factorization(_lowercase ) lowercase_ = prime_factorization(_lowercase ) elif numbera == 1 or numbera == 1: lowercase_ = [] lowercase_ = [] lowercase_ = max(_lowercase , _lowercase ) lowercase_ = 0 lowercase_ = 0 lowercase_ = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowercase_ = prime_fac_a.count(_lowercase ) lowercase_ = prime_fac_a.count(_lowercase ) for _ in range(max(_lowercase , _lowercase ) ): ans *= n else: lowercase_ = prime_fac_a.count(_lowercase ) for _ in range(_lowercase ): ans *= n done.append(_lowercase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowercase_ = prime_fac_a.count(_lowercase ) for _ in range(_lowercase ): ans *= n done.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def a ( snake_case__: Dict ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'number' must been a positive int" lowercase_ = 0 lowercase_ = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowercase ): ans += 1 # precondition assert isinstance(_lowercase , _lowercase ) and is_prime( _lowercase ), "'ans' must been a prime number and from type int" return ans def a ( snake_case__: List[str] , snake_case__: Tuple ): '''simple docstring''' assert ( is_prime(_lowercase ) and is_prime(_lowercase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowercase_ = p_number_a + 1 # jump to the next number lowercase_ = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowercase ): number += 1 while number < p_number_a: ans.append(_lowercase ) number += 1 # fetch the next prime number. while not is_prime(_lowercase ): number += 1 # precondition assert ( isinstance(_lowercase , _lowercase ) and ans[0] != p_number_a and ans[len(_lowercase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def a ( snake_case__: Tuple ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 1), "'n' must been int and >= 1" lowercase_ = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowercase ) # precondition assert ans[0] == 1 and ans[len(_lowercase ) - 1] == n, "Error in function getDivisiors(...)" return ans def a ( snake_case__: Tuple ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number > 1 ), "'number' must been an int and >= 1" lowercase_ = get_divisors(_lowercase ) # precondition assert ( isinstance(_lowercase , _lowercase ) and (divisors[0] == 1) and (divisors[len(_lowercase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def a ( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowercase_ = gcd(abs(_lowercase ) , abs(_lowercase ) ) # precondition assert ( isinstance(_lowercase , _lowercase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def a ( snake_case__: List[str] ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been a int and >= 0" lowercase_ = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def a ( snake_case__: str ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been an int and >= 0" lowercase_ = 0 lowercase_ = 1 lowercase_ = 1 # this will be return for _ in range(n - 1 ): lowercase_ = ans ans += fiba lowercase_ = tmp return ans
718
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Dict ) -> int: lowercase_ = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Optional[int] ) -> str: lowercase_ = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Tuple ) -> Tuple: lowercase_ = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Optional[Any] ) -> List[Any]: lowercase_ = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Optional[Any] ) -> List[str]: lowercase_ = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Any ) -> Optional[Any]: lowercase_ = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] lowercase_ = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : List[Any] ) -> int: lowercase_ = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] lowercase_ = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Any ) -> int: # pass variant but use the non-variant filenames lowercase_ = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] lowercase_ = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : str ) -> List[str]: lowercase_ = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowercase_ = '''fp16''' self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Union[str, Any] ) -> Tuple: lowercase_ = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] lowercase_ = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Dict ) -> Any: # pass variant but use the non-variant filenames lowercase_ = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] lowercase_ = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Optional[int] ) -> Optional[Any]: lowercase_ = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] lowercase_ = '''fp16''' self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
409
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _A = logging.get_logger(__name__) class _lowerCamelCase ( a_ ): _lowerCamelCase :List[Any] = ["input_features", "attention_mask"] def __init__( self : Optional[int] , UpperCamelCase : Any=80 , UpperCamelCase : Dict=1_60_00 , UpperCamelCase : List[Any]=80 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Tuple=True , **UpperCamelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = num_mel_bins lowerCAmelCase__ : Optional[int] = do_ceptral_normalize lowerCAmelCase__ : int = normalize_means lowerCAmelCase__ : Dict = normalize_vars lowerCAmelCase__ : str = True def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : np.ndarray , ) -> np.ndarray: """simple docstring""" lowerCAmelCase__ : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers lowerCAmelCase__ : Union[str, Any] = torch.from_numpy(UpperCamelCase ).unsqueeze(0 ) lowerCAmelCase__ : Dict = ta_kaldi.fbank(UpperCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCAmelCase ( UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : Optional[bool] = True , UpperCamelCase : Optional[bool] = True , UpperCamelCase : float = 0.0 , ) -> np.ndarray: """simple docstring""" # make sure we normalize float32 arrays if normalize_means: lowerCAmelCase__ : Tuple = x[:input_length].mean(axis=0 ) lowerCAmelCase__ : Tuple = np.subtract(UpperCamelCase , UpperCamelCase ) if normalize_vars: lowerCAmelCase__ : Any = x[:input_length].std(axis=0 ) lowerCAmelCase__ : str = np.divide(UpperCamelCase , UpperCamelCase ) if input_length < x.shape[0]: lowerCAmelCase__ : List[str] = padding_value # make sure array is in float32 lowerCAmelCase__ : int = x.astype(np.floataa ) return x def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[np.ndarray] , UpperCamelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(UpperCamelCase , UpperCamelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(UpperCamelCase , UpperCamelCase ) ] def __call__( self : str , UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , **UpperCamelCase : List[str] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCAmelCase__ : List[Any] = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowerCAmelCase__ : Union[str, Any] = is_batched_numpy or ( isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ : Any = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ): lowerCAmelCase__ : List[Any] = np.asarray(UpperCamelCase , dtype=np.floataa ) elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ : List[str] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ : Union[str, Any] = [raw_speech] # extract fbank features lowerCAmelCase__ : Dict = [self._extract_fbank_features(UpperCamelCase ) for waveform in raw_speech] # convert into correct format for padding lowerCAmelCase__ : int = BatchFeature({"""input_features""": features} ) lowerCAmelCase__ : Union[str, Any] = self.pad( UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , ) # make sure list is in array format lowerCAmelCase__ : List[Any] = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , UpperCamelCase ): lowerCAmelCase__ : Tuple = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features] lowerCAmelCase__ : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: lowerCAmelCase__ : List[Any] = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: lowerCAmelCase__ : Optional[Any] = ( np.array(UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) lowerCAmelCase__ : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=UpperCamelCase ) if return_tensors is not None: lowerCAmelCase__ : Tuple = padded_inputs.convert_to_tensors(UpperCamelCase ) return padded_inputs
299
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class _lowerCamelCase ( nn.Module ): def __init__( self : Optional[Any] ) -> Optional[int]: """simple docstring""" super().__init__() lowerCAmelCase__ : Optional[int] = nn.Linear(3 , 4 ) lowerCAmelCase__ : int = nn.BatchNormad(4 ) lowerCAmelCase__ : Optional[Any] = nn.Linear(4 , 5 ) def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Tuple: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) ) class _lowerCamelCase ( unittest.TestCase ): def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Any = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase , model.state_dict() ) lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """index.json""" ) self.assertTrue(os.path.isfile(UpperCamelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , f"""{key}.dat""" ) self.assertTrue(os.path.isfile(UpperCamelCase ) ) # TODO: add tests on the fact weights are properly loaded def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" lowerCAmelCase__ : List[str] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: lowerCAmelCase__ : Union[str, Any] = torch.randn(2 , 3 , dtype=UpperCamelCase ) with TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : Optional[Any] = offload_weight(UpperCamelCase , """weight""" , UpperCamelCase , {} ) lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """weight.dat""" ) self.assertTrue(os.path.isfile(UpperCamelCase ) ) self.assertDictEqual(UpperCamelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCamelCase ).split(""".""" )[1]}} ) lowerCAmelCase__ : Any = load_offloaded_weight(UpperCamelCase , index["""weight"""] ) self.assertTrue(torch.equal(UpperCamelCase , UpperCamelCase ) ) def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = ModelForTest() lowerCAmelCase__ : Optional[Any] = model.state_dict() lowerCAmelCase__ : Tuple = {k: v for k, v in state_dict.items() if """linear2""" not in k} lowerCAmelCase__ : Any = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase ) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) ) lowerCAmelCase__ : str = {k: v for k, v in state_dict.items() if """weight""" in k} lowerCAmelCase__ : str = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase ) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase , UpperCamelCase ) # Duplicates are removed lowerCAmelCase__ : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase ) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) ) def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowerCAmelCase__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} lowerCAmelCase__ : Any = extract_submodules_state_dict(UpperCamelCase , ["""a.1""", """a.2"""] ) self.assertDictEqual(UpperCamelCase , {"""a.1""": 0, """a.2""": 2} ) lowerCAmelCase__ : str = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} lowerCAmelCase__ : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase , ["""a.1""", """a.2"""] ) self.assertDictEqual(UpperCamelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
299
1
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def _A ( ): lowercase__ , lowercase__ = 9, 14 # noqa: F841 lowercase__ = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowercase__ = defaultdict(__magic_name__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowercase__ = mst(__magic_name__ ) lowercase__ = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowercase__ = tuple(answer[:2] ) lowercase__ = tuple(edge[::-1] ) assert edge in result or reverse in result
611
def _A ( __magic_name__ , __magic_name__ ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowercase__ = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" lowercase__ = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" lowercase__ = max(len(__magic_name__ ) , len(__magic_name__ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
611
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_a , ) assert hasattr(self , '''env''' ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple ): # configuration for running training on smdistributed Model Parallel snake_case__ = { '''enabled''': True, '''processes_per_host''': 8, } snake_case__ = { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } snake_case__ = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} snake_case__ = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 5_00, } , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:int ): TrainingJobAnalytics(_a ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] ): # create estimator snake_case__ = self.create_estimator(_a ) # run training estimator.fit() # result dataframe snake_case__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) snake_case__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
33
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
1
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : Optional[Any] = logging.get_logger(__name__) lowercase : Union[str, Any] = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } lowercase : Optional[int] = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } lowercase : List[Any] = """</w>""" lowercase : str = """@@ """ def UpperCAmelCase_ ( _UpperCAmelCase ) -> Any: lowerCamelCase_: List[Any] = set() lowerCamelCase_: Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_: Any = char return pairs # Speech2Text2 has no max input length lowercase : List[Any] = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class a__ ( __SCREAMING_SNAKE_CASE ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , A_ : Optional[Any] , A_ : Any="<s>" , A_ : Union[str, Any]="<pad>" , A_ : Optional[int]="</s>" , A_ : Tuple="<unk>" , A_ : List[Any]=False , A_ : Dict=None , **A_ : List[Any] , ) -> Any: """simple docstring""" super().__init__( unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , do_lower_case=A_ , **A_ , ) lowerCamelCase_: int = do_lower_case with open(A_ , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase_: Union[str, Any] = json.load(A_ ) lowerCamelCase_: Any = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) lowerCamelCase_: int = None lowerCamelCase_: Union[str, Any] = None else: with open(A_ , encoding="""utf-8""" ) as merges_handle: lowerCamelCase_: Optional[Any] = merges_handle.read().split("""\n""" )[:-1] lowerCamelCase_: List[Any] = [tuple(merge.split()[:2] ) for merge in merges] lowerCamelCase_: Union[str, Any] = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_: int = {} @property def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return len(self.decoder ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , A_ : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_: Optional[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCamelCase_: List[str] = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_: Optional[int] = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_: Union[str, Any] = bigram lowerCamelCase_: Optional[Any] = [] lowerCamelCase_: Any = 0 while i < len(A_ ): try: lowerCamelCase_: Optional[Any] = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_: Tuple = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_: Dict = tuple(A_ ) lowerCamelCase_: Union[str, Any] = new_word if len(A_ ) == 1: break else: lowerCamelCase_: List[Any] = get_pairs(A_ ) lowerCamelCase_: Optional[Any] = """ """.join(A_ ) if word == "\n " + BPE_TOKEN_MERGES: lowerCamelCase_: int = """\n""" + BPE_TOKEN_MERGES if word.endswith(A_ ): lowerCamelCase_: str = word.replace(A_ , """""" ) lowerCamelCase_: Optional[Any] = word.replace(""" """ , A_ ) lowerCamelCase_: Optional[Any] = word return word def lowerCAmelCase ( self : int , A_ : Union[str, Any] ) -> str: """simple docstring""" if self.bpe_ranks is None: raise ValueError( """This tokenizer was instantiated without a `merges.txt` file, so""" """ that it can only be used for decoding, not for encoding.""" """Make sure to provide `merges.txt` file at instantiation to enable """ """encoding.""" ) if self.do_lower_case: lowerCamelCase_: Optional[int] = text.lower() lowerCamelCase_: Dict = text.split() lowerCamelCase_: Any = [] for token in text: if token: split_tokens.extend(list(self.bpe(A_ ).split(""" """ ) ) ) return split_tokens def lowerCAmelCase ( self : List[str] , A_ : str ) -> int: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : Dict , A_ : int ) -> str: """simple docstring""" lowerCamelCase_: int = self.decoder.get(A_ , self.unk_token ) return result def lowerCAmelCase ( self : List[Any] , A_ : List[str] ) -> str: """simple docstring""" lowerCamelCase_: str = """ """.join(A_ ) # make sure @@ tokens are concatenated lowerCamelCase_: Dict = """""".join(string.split(A_ ) ) return string def lowerCAmelCase ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_: List[str] = os.path.join( A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase_: str = os.path.join( A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(A_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" ) lowerCamelCase_: Any = 0 if self.bpe_ranks is None: return (vocab_file,) with open(A_ , """w""" , encoding="""utf-8""" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase_: str = token_index writer.write(""" """.join(A_ ) + """\n""" ) index += 1 return (vocab_file, merges_file)
703
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
584
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,*A_ : int ,**A_ : Union[str, Any] ) -> None: warnings.warn( 'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use OwlViTImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
91
"""simple docstring""" from __future__ import annotations def __UpperCamelCase ( snake_case__ , snake_case__ = None , snake_case__ = None ): if start is None: A_ : Dict = 0 if end is None: A_ : Dict = len(snake_case__ ) - 1 if start >= end: return A_ : List[Any] = (start + end) // 2 slowsort(snake_case__ , snake_case__ , snake_case__ ) slowsort(snake_case__ , mid + 1 , snake_case__ ) if sequence[end] < sequence[mid]: A_ , A_ : Dict = sequence[mid], sequence[end] slowsort(snake_case__ , snake_case__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
180
0
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class _lowerCamelCase (tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : List[Any] , lowerCamelCase_ : float , lowerCamelCase_ : Callable , lowerCamelCase_ : int , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : str = None , ): """simple docstring""" super().__init__() _lowercase : Dict = initial_learning_rate _lowercase : List[Any] = warmup_steps _lowercase : Tuple = power _lowercase : int = decay_schedule_fn _lowercase : Any = name def __call__( self : List[str] , lowerCamelCase_ : int ): """simple docstring""" with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. _lowercase : Optional[Any] = tf.cast(lowerCamelCase_ , tf.floataa ) _lowercase : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa ) _lowercase : List[str] = global_step_float / warmup_steps_float _lowercase : Dict = self.initial_learning_rate * tf.math.pow(lowerCamelCase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase_ , ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = 0.9 ,__UpperCAmelCase = 0.9_9_9 ,__UpperCAmelCase = 1E-8 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = 1.0 ,__UpperCAmelCase = None ,): """simple docstring""" _lowercase : str = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__UpperCAmelCase ,) if num_warmup_steps: _lowercase : List[str] = WarmUp( initial_learning_rate=__UpperCAmelCase ,decay_schedule_fn=__UpperCAmelCase ,warmup_steps=__UpperCAmelCase ,) if weight_decay_rate > 0.0: _lowercase : Union[str, Any] = AdamWeightDecay( learning_rate=__UpperCAmelCase ,weight_decay_rate=__UpperCAmelCase ,beta_a=__UpperCAmelCase ,beta_a=__UpperCAmelCase ,epsilon=__UpperCAmelCase ,clipnorm=__UpperCAmelCase ,global_clipnorm=__UpperCAmelCase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__UpperCAmelCase ,) else: _lowercase : Optional[int] = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase ,beta_a=__UpperCAmelCase ,beta_a=__UpperCAmelCase ,epsilon=__UpperCAmelCase ,clipnorm=__UpperCAmelCase ,global_clipnorm=__UpperCAmelCase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class _lowerCamelCase (__lowerCamelCase ): def __init__( self : Dict , lowerCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowerCamelCase_ : float = 0.9 , lowerCamelCase_ : float = 0.999 , lowerCamelCase_ : float = 1E-7 , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : str = "AdamWeightDecay" , **lowerCamelCase_ : int , ): """simple docstring""" super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) _lowercase : Union[str, Any] = weight_decay_rate _lowercase : str = include_in_weight_decay _lowercase : Dict = exclude_from_weight_decay @classmethod def __UpperCAmelCase ( cls : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" _lowercase : Tuple = {'WarmUp': WarmUp} return super(lowerCamelCase_ , cls ).from_config(lowerCamelCase_ , custom_objects=lowerCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ): """simple docstring""" super(lowerCamelCase_ , self )._prepare_local(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) _lowercase : Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ): """simple docstring""" _lowercase : Any = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Dict ): """simple docstring""" _lowercase : int = list(zip(*lowerCamelCase_ ) ) return super(lowerCamelCase_ , self ).apply_gradients(zip(lowerCamelCase_ , lowerCamelCase_ ) , name=lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} _lowercase : int = apply_state or {} _lowercase : List[Any] = apply_state.get((var_device, var_dtype) ) if coefficients is None: _lowercase : Any = self._fallback_apply_state(lowerCamelCase_ , lowerCamelCase_ ) _lowercase : Union[str, Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=None ): """simple docstring""" _lowercase : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase_ ) _lowercase : Union[str, Any] = self._decay_weights_op(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase_ , self )._resource_apply_dense(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any]=None ): """simple docstring""" _lowercase : Any = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase_ ) _lowercase : List[Any] = self._decay_weights_op(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase_ , self )._resource_apply_sparse(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _lowercase : List[Any] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowerCamelCase_ , lowerCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowerCamelCase_ , lowerCamelCase_ ) is not None: return False return True class _lowerCamelCase (__lowerCamelCase ): def __init__( self : Union[str, Any] ): """simple docstring""" _lowercase : Any = [] _lowercase : List[Any] = None @property def __UpperCAmelCase ( self : int ): """simple docstring""" if self._accum_steps is None: _lowercase : List[str] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : List[Any] , lowerCamelCase_ : int ): """simple docstring""" if not self._gradients: _lowercase : Optional[int] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowerCamelCase_ ) , trainable=lowerCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowerCamelCase_ ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients , lowerCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowerCamelCase_ ) self._accum_steps.assign_add(1 ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowerCamelCase_ ) )
716
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class _lowerCamelCase (__lowerCamelCase ): _snake_case = ["input_features", "attention_mask"] def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ ) _lowercase : Dict = feature_size _lowercase : Dict = sampling_rate _lowercase : Tuple = padding_value _lowercase : int = hop_length _lowercase : Any = win_length _lowercase : Union[str, Any] = frame_signal_scale _lowercase : Tuple = preemphasis_coeff _lowercase : Tuple = mel_floor _lowercase : Tuple = normalize_means _lowercase : List[Any] = normalize_vars _lowercase : List[str] = win_function _lowercase : int = return_attention_mask _lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0 _lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0 _lowercase : str = optimal_fft_length(self.sample_size ) _lowercase : Dict = (self.n_fft // 2) + 1 def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ): """simple docstring""" if self.win_function == "hamming_window": _lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ ) else: _lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function ) _lowercase : Tuple = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) _lowercase : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ): """simple docstring""" if self.normalize_means: _lowercase : Optional[int] = x[:input_length].mean(axis=0 ) _lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ ) if self.normalize_vars: _lowercase : int = x[:input_length].std(axis=0 ) _lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ ) if input_length < x.shape[0]: _lowercase : Dict = padding_value # make sure array is in float32 _lowercase : Tuple = x.astype(np.floataa ) return x def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ): """simple docstring""" _lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )] def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase : Optional[int] = is_batched_numpy or ( isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ): _lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa ) elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : str = [raw_speech] # extract fbank features _lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech] # convert into correct format for padding _lowercase : Optional[int] = BatchFeature({'input_features': features} ) _lowercase : Tuple = self.pad( lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , ) # make sure list is in array format _lowercase : Dict = padded_inputs.get('input_features' ) if isinstance(input_features[0] , lowerCamelCase_ ): _lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features] _lowercase : List[Any] = padded_inputs.get('attention_mask' ) if attention_mask is not None: _lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: _lowercase : int = ( np.array(lowerCamelCase_ , dtype=np.intaa ) if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) _lowercase : List[Any] = self.normalize( padded_inputs['input_features'] , attention_mask=lowerCamelCase_ ) if return_tensors is not None: _lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ ) return padded_inputs
283
0
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCamelCase : Optional[int] = logging.get_logger(__name__) __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} __UpperCamelCase : str = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } __UpperCamelCase : Optional[Any] = { '''abeja/gpt-neox-japanese-2.7b''': 2048, } def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Any ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f: lowerCAmelCase = json.loads(f.read() ) lowerCAmelCase = collections.OrderedDict() lowerCAmelCase = collections.OrderedDict() lowerCAmelCase = collections.OrderedDict() with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f: lowerCAmelCase = f.readlines() lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(_UpperCAmelCase ): lowerCAmelCase = b lowerCAmelCase = idx for wd in b: lowerCAmelCase = idx return vocab, raw_vocab, ids_to_tokens, emoji class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ): """simple docstring""" super().__init__( unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , ) if not os.path.isfile(_snake_case ): raise ValueError( F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(_snake_case ): raise ValueError( F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) lowerCAmelCase = do_clean_text lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case ) lowerCAmelCase = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.raw_vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.subword_tokenizer.convert_id_to_token(_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] ) if len(_snake_case ) > self.model_max_length: lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: lowerCAmelCase = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(','.join(_snake_case ) + '\n' ) index += 1 with open(_snake_case , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , _snake_case ) return vocab_file, emoji_file class a ( a__ ): def __init__( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = vocab # same as swe lowerCAmelCase = ids_to_tokens # same as bpe lowerCAmelCase = emoji lowerCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] ) lowerCAmelCase = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) lowerCAmelCase = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) lowerCAmelCase = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) lowerCAmelCase = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self ): """simple docstring""" return len(self.ids_to_tokens ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.content_repattera.sub('<URL>' , _snake_case ) lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , _snake_case ) lowerCAmelCase = self.content_repattera.sub('<TEL>' , _snake_case ) lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case ) lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case ) lowerCAmelCase = self.content_repattera.sub('<PRICE>' , _snake_case ) lowerCAmelCase = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def UpperCamelCase__ ( self , _snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = text.replace(' ' , '<SP>' ) lowerCAmelCase = text.replace(' ' , '<SP>' ) lowerCAmelCase = text.replace('\r\n' , '<BR>' ) lowerCAmelCase = text.replace('\n' , '<BR>' ) lowerCAmelCase = text.replace('\r' , '<BR>' ) lowerCAmelCase = text.replace('\t' , '<TAB>' ) lowerCAmelCase = text.replace('—' , 'ー' ) lowerCAmelCase = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCAmelCase = text.replace(_snake_case , _snake_case ) if clean: lowerCAmelCase = self.clean_text(_snake_case ) def check_simbol(_snake_case ): lowerCAmelCase = x.encode() if len(_snake_case ) == 1 and len(_snake_case ) == 2: lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xc_2a1 and c <= 0xc_2bf) or (c >= 0xc_780 and c <= 0xc_783) or (c >= 0xc_ab9 and c <= 0xc_bbf) or (c >= 0xc_c80 and c <= 0xc_da2) ): return True return False def checkuae(_snake_case ): lowerCAmelCase = x.encode() if len(_snake_case ) == 1 and len(_snake_case ) == 3: lowerCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xe28_080 and c <= 0xe2b_07f: return True return False lowerCAmelCase = 0 lowerCAmelCase = [] while pos < len(_snake_case ): lowerCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 lowerCAmelCase = [] # (token_id, token, pos) for e in range(_snake_case , _snake_case , -1 ): lowerCAmelCase = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_snake_case ) > 2: lowerCAmelCase = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(_snake_case ) > 0: # the smallest token_id is adopted lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0] result.append(_snake_case ) lowerCAmelCase = e else: lowerCAmelCase = pos + 1 lowerCAmelCase = text[pos:end] if check_simbol(_snake_case ): result.append('<KIGOU>' ) elif checkuae(_snake_case ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) lowerCAmelCase = end return result def UpperCamelCase__ ( self , _snake_case , _snake_case="\n" ): """simple docstring""" lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(_snake_case ) > 0: words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(_snake_case ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(_snake_case ) if len(_snake_case ) > 0: words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase = ''.join(_snake_case ) return text
4
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') lowerCAmelCase_ : int = {'target_lang': 'fi', 'source_lang': 'en'} lowerCAmelCase_ : str = '>>zh<<' lowerCAmelCase_ : List[str] = 'Helsinki-NLP/' if is_torch_available(): lowerCAmelCase_ : Dict = 'pt' elif is_tf_available(): lowerCAmelCase_ : Union[str, Any] = 'tf' else: lowerCAmelCase_ : int = 'jax' @require_sentencepiece class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ): __magic_name__ : Dict = MarianTokenizer __magic_name__ : Any = False __magic_name__ : str = True def lowercase_ ( self : Any ): '''simple docstring''' super().setUp() a_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] a_ : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) a_ : List[str] = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) a_ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Tuple , **lowercase__ : int ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def lowercase_ ( self : int , lowercase__ : int ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowercase_ ( self : List[str] ): '''simple docstring''' a_ : Optional[int] = """</s>""" a_ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def lowercase_ ( self : Tuple ): '''simple docstring''' a_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowercase__ ) , 9 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowercase_ ( self : str ): '''simple docstring''' a_ : str = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" ) a_ : Any = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) a_ : str = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(lowercase__ , batch.input_ids[0] ) a_ : Union[str, Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase__ ) a_ : Union[str, Any] = [x.name for x in Path(lowercase__ ).glob("""*""" )] self.assertIn("""source.spm""" , lowercase__ ) MarianTokenizer.from_pretrained(lowercase__ ) def lowercase_ ( self : str ): '''simple docstring''' a_ : int = self.get_tokenizer() a_ : Dict = tok( ["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' a_ : List[str] = self.get_tokenizer() a_ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' a_ : Optional[int] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowercase_ ( self : int ): '''simple docstring''' a_ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) a_ : Tuple = """Tämä on testi""" a_ : Union[str, Any] = """This is a test""" a_ : Union[str, Any] = [76, 7, 2047, 2] a_ : Optional[int] = [69, 12, 11, 940, 2] a_ : Optional[Any] = tokenizer(lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) a_ : Optional[int] = tokenizer(text_target=lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) a_ : str = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ )
442
0
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
705
'''simple docstring''' def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ): """simple docstring""" if collection == []: return [] # get some information about the collection UpperCAmelCase_ : str = len(lowerCamelCase_ ) UpperCAmelCase_ : int = max(lowerCamelCase_ ) UpperCAmelCase_ : int = min(lowerCamelCase_ ) # create the counting array UpperCAmelCase_ : List[str] = coll_max + 1 - coll_min UpperCAmelCase_ : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection UpperCAmelCase_ : str = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCamelCase_ ) ): UpperCAmelCase_ : Optional[int] = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase ( lowerCamelCase_ : Any ): """simple docstring""" return "".join([chr(lowerCamelCase_ ) for i in counting_sort([ord(lowerCamelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" snake_case__ : Any = input('''Enter numbers separated by a comma:\n''').strip() snake_case__ : int = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
389
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case__ : Tuple = 'huggingface/label-files' snake_case__ : Union[str, Any] = 'imagenet-1k-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) snake_case__ : Tuple = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case__ : Optional[Any] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" snake_case__ : str = BitConfig( conv_layer=__SCREAMING_SNAKE_CASE , num_labels=1000 , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE , ) return config def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]: if "stem.conv" in name: snake_case__ : str = name.replace('stem.conv' , 'bit.embedder.convolution' ) if "blocks" in name: snake_case__ : Any = name.replace('blocks' , 'layers' ) if "head.fc" in name: snake_case__ : Any = name.replace('head.fc' , 'classifier.1' ) if name.startswith('norm' ): snake_case__ : Optional[int] = 'bit.' + name if "bit" not in name and "classifier" not in name: snake_case__ : str = 'bit.encoder.' + name return name def UpperCamelCase__ ( ) -> Any: snake_case__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : Dict = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: snake_case__ : Tuple = get_config(__SCREAMING_SNAKE_CASE ) # load original model from timm snake_case__ : Tuple = create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model snake_case__ : Tuple = timm_model.state_dict() for key in state_dict.copy().keys(): snake_case__ : int = state_dict.pop(__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = val.squeeze() if 'head' in key else val # load HuggingFace model snake_case__ : List[Any] = BitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # create image processor snake_case__ : Tuple = create_transform(**resolve_data_config({} , model=__SCREAMING_SNAKE_CASE ) ) snake_case__ : Optional[Any] = transform.transforms snake_case__ : Any = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } snake_case__ : Optional[int] = BitImageProcessor( do_resize=__SCREAMING_SNAKE_CASE , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__SCREAMING_SNAKE_CASE , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case__ : Dict = prepare_img() snake_case__ : Union[str, Any] = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) snake_case__ : Dict = processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # verify logits with torch.no_grad(): snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = outputs.logits print('Logits:' , logits[0, :3] ) print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] ) snake_case__ : List[str] = timm_model(__SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: print(f"Pushing model {model_name} and processor to the hub" ) model.push_to_hub(f"ybelkada/{model_name}" ) processor.push_to_hub(f"ybelkada/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) A_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
270
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ = { "configuration_roberta_prelayernorm": [ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormMainLayer", "TFRobertaPreLayerNormModel", "TFRobertaPreLayerNormPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxRobertaPreLayerNormPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
270
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> tuple[complex, complex]: '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) UpperCAmelCase_ : str = b * b - 4 * a * c UpperCAmelCase_ : Optional[int] = (-b + sqrt(UpperCamelCase )) / (2 * a) UpperCAmelCase_ : str = (-b - sqrt(UpperCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def SCREAMING_SNAKE_CASE( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ : List[Any] = quadratic_roots(a=5 ,b=6 ,c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
712
'''simple docstring''' import numpy class lowercase : def __init__( self , _snake_case , _snake_case) -> None: UpperCAmelCase_ : Optional[Any] = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. UpperCAmelCase_ : Tuple = numpy.random.rand( self.input_array.shape[1] , 4) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. UpperCAmelCase_ : List[str] = numpy.random.rand( 4 , 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. UpperCAmelCase_ : Dict = numpy.random.rand(3 , 1) # Real output values provided. UpperCAmelCase_ : str = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. UpperCAmelCase_ : Union[str, Any] = numpy.zeros(output_array.shape) def _snake_case ( self) -> numpy.ndarray: UpperCAmelCase_ : Any = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights)) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. UpperCAmelCase_ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , )) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. UpperCAmelCase_ : int = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , )) return self.layer_between_second_hidden_layer_and_output def _snake_case ( self) -> None: UpperCAmelCase_ : Optional[int] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , ) UpperCAmelCase_ : Any = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer) , ) UpperCAmelCase_ : Tuple = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> None: for iteration in range(1 , iterations + 1): UpperCAmelCase_ : int = self.feedforward() self.back_propagation() if give_loss: UpperCAmelCase_ : List[Any] = numpy.mean(numpy.square(output - self.feedforward())) print(F"""Iteration {iteration} Loss: {loss}""") def _snake_case ( self , _snake_case) -> int: UpperCAmelCase_ : Optional[int] = input_arr UpperCAmelCase_ : Tuple = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights)) UpperCAmelCase_ : Optional[int] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , )) UpperCAmelCase_ : int = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , )) return int(self.layer_between_second_hidden_layer_and_output > 0.6) def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray: return (value) * (1 - (value)) def SCREAMING_SNAKE_CASE( ) -> int: UpperCAmelCase_ : Optional[int] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) ,dtype=numpy.floataa ,) # True output values for the given input values. UpperCAmelCase_ : Dict = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa ) # Calling neural network class. UpperCAmelCase_ : List[str] = TwoHiddenLayerNeuralNetwork( input_array=UpperCamelCase ,output_array=UpperCamelCase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=UpperCamelCase ,iterations=1_0 ,give_loss=UpperCamelCase ) return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) ) if __name__ == "__main__": example()
471
0
from typing import TYPE_CHECKING from ...utils import _LazyModule SCREAMING_SNAKE_CASE = {'tokenization_byt5': ['ByT5Tokenizer']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
99
def a (lowerCAmelCase__ ): __a = False while is_sorted is False: # Until all the indices are traversed keep looping __a = True for i in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __a , __a = input_list[i + 1], input_list[i] # swapping if elements not in order __a = False for i in range(1 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __a , __a = input_list[i + 1], input_list[i] # swapping if elements not in order __a = False return input_list if __name__ == "__main__": print('Enter list to be sorted') SCREAMING_SNAKE_CASE = [int(x) for x in input().split()] # inputing elements of the list in one line SCREAMING_SNAKE_CASE = odd_even_sort(input_list) print('The sorted list is') print(sorted_list)
99
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu SCREAMING_SNAKE_CASE__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: SCREAMING_SNAKE_CASE__ = json.load(f) @require_torch class snake_case (unittest.TestCase ): def _a ( self ,UpperCAmelCase_ ) -> Tuple: return FSMTTokenizer.from_pretrained(UpperCAmelCase_ ) def _a ( self ,UpperCAmelCase_ ) -> Optional[Any]: lowercase__ = FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[Any]: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowercase__ = F'''facebook/wmt19-{pair}''' lowercase__ = self.get_tokenizer(UpperCAmelCase_ ) lowercase__ = self.get_model(UpperCAmelCase_ ) lowercase__ = bleu_data[pair]["src"] lowercase__ = bleu_data[pair]["tgt"] lowercase__ = tokenizer(UpperCAmelCase_ ,return_tensors="pt" ,truncation=UpperCAmelCase_ ,padding="longest" ).to(UpperCAmelCase_ ) lowercase__ = model.generate( input_ids=batch.input_ids ,num_beams=8 ,) lowercase__ = tokenizer.batch_decode( UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ ,clean_up_tokenization_spaces=UpperCAmelCase_ ) lowercase__ = calculate_bleu(UpperCAmelCase_ ,UpperCAmelCase_ ) print(UpperCAmelCase_ ) self.assertGreaterEqual(scores["bleu"] ,UpperCAmelCase_ )
539
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class snake_case (UpperCamelCase , unittest.TestCase ): lowerCAmelCase__ :int = TextToVideoSDPipeline lowerCAmelCase__ :Union[str, Any] = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. lowerCAmelCase__ :Optional[Any] = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def _a ( self ) -> Optional[int]: torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,) lowercase__ = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="scaled_linear" ,clip_sample=UpperCAmelCase_ ,set_alpha_to_one=UpperCAmelCase_ ,) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,) lowercase__ = CLIPTextModel(UpperCAmelCase_ ) lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ) -> int: if str(UpperCAmelCase_ ).startswith("mps" ): lowercase__ = torch.manual_seed(UpperCAmelCase_ ) else: lowercase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowercase__ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def _a ( self ) -> Tuple: lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = TextToVideoSDPipeline(**UpperCAmelCase_ ) lowercase__ = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase__ = self.get_dummy_inputs(UpperCAmelCase_ ) lowercase__ = "np" lowercase__ = sd_pipe(**UpperCAmelCase_ ).frames lowercase__ = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) lowercase__ = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ ,expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,) def _a ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ ,expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self ) -> Union[str, Any]: pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self ) -> Union[str, Any]: pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def _a ( self ) -> Union[str, Any]: pass def _a ( self ) -> str: return super().test_progress_bar() @slow @skip_mps class snake_case (unittest.TestCase ): def _a ( self ) -> Optional[int]: lowercase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) lowercase__ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase__ = pipe.to("cuda" ) lowercase__ = "Spiderman is surfing" lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ = pipe(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,num_inference_steps=25 ,output_type="pt" ).frames lowercase__ = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def _a ( self ) -> Optional[int]: lowercase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) lowercase__ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) lowercase__ = pipe.to("cuda" ) lowercase__ = "Spiderman is surfing" lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ = pipe(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,num_inference_steps=2 ,output_type="pt" ).frames lowercase__ = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
539
1
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]: lowerCAmelCase__ = bnb_quantization_config.load_in_abit lowerCAmelCase__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase__ = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase__ = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) lowerCAmelCase__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase__ = [] lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype lowerCAmelCase__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowerCAmelCase__ = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase__ = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) lowerCAmelCase__ = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase__ = True lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]: if device_map is None: if torch.cuda.is_available(): lowerCAmelCase__ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase__ = {} lowerCAmelCase__ = special_dtypes lowerCAmelCase__ = no_split_module_classes lowerCAmelCase__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase__ = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) lowerCAmelCase__ = max_memory lowerCAmelCase__ = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _snake_case ( A , A , A=None , A=None ) -> Any: if modules_to_not_convert is None: lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]: lowerCAmelCase__ = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase__ = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase__ = '''.'''.join(A ) lowerCAmelCase__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase__ = module.weight.data if module.bias is not None: lowerCAmelCase__ = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) lowerCAmelCase__ = True if len(list(module.children() ) ) > 0: lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) lowerCAmelCase__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( A ) -> Tuple: # Create a copy of the model with init_empty_weights(): lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase__ = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCAmelCase__ = sum(A , [] ) lowerCAmelCase__ = len(A ) > 0 # Check if it is a base model lowerCAmelCase__ = False if hasattr(A , '''base_model_prefix''' ): lowerCAmelCase__ = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase__ = list(model.named_children() ) lowerCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase__ = set(A ) - set(A ) lowerCAmelCase__ = list(set(A ) ) + list(A ) # remove ".weight" from the keys lowerCAmelCase__ = ['''.weight''', '''.bias'''] lowerCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase__ = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def _snake_case ( A ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _snake_case ( A ) -> Union[str, Any]: return next(parameter.parameters() ).device def _snake_case ( A , A , A , A , A , A , A ) -> Any: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) lowerCAmelCase__ = param_name lowerCAmelCase__ = model if "." in tensor_name: lowerCAmelCase__ = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase__ = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCAmelCase__ = new_module lowerCAmelCase__ = splits[-1] # offload weights lowerCAmelCase__ = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
90
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} _SCREAMING_SNAKE_CASE : Tuple = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } _SCREAMING_SNAKE_CASE : int = { "abeja/gpt-neox-japanese-2.7b": 2048, } def UpperCamelCase_( snake_case : List[Any] , snake_case : List[str] ): '''simple docstring''' with open(snake_case , "r" , encoding="utf-8" ) as f: snake_case_ = json.loads(f.read() ) snake_case_ = collections.OrderedDict() snake_case_ = collections.OrderedDict() snake_case_ = collections.OrderedDict() with open(snake_case , "r" , encoding="utf-8" ) as f: snake_case_ = f.readlines() snake_case_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(snake_case ): snake_case_ = b snake_case_ = idx for wd in b: snake_case_ = idx return vocab, raw_vocab, ids_to_tokens, emoji class _snake_case ( lowercase_ ): lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : int = ["input_ids", "attention_mask"] def __init__( self , a__ , a__ , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|startoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Optional[int]: '''simple docstring''' super().__init__( unk_token=a__ , pad_token=a__ , bos_token=a__ , eos_token=a__ , do_clean_text=a__ , **a__ , ) if not os.path.isfile(a__ ): raise ValueError( F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(a__ ): raise ValueError( F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) snake_case_ = do_clean_text snake_case_ , snake_case_ , snake_case_ , snake_case_ = load_vocab_and_emoji(a__ , a__ ) snake_case_ = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' return len(self.raw_vocab ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowerCAmelCase__ ( self , a__ ) -> Any: '''simple docstring''' return self.subword_tokenizer.tokenize(a__ , clean=self.do_clean_text ) def lowerCAmelCase__ ( self , a__ ) -> List[Any]: '''simple docstring''' return self.vocab.get(a__ , self.vocab.get(self.unk_token ) ) def lowerCAmelCase__ ( self , a__ ) -> List[Any]: '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(a__ ) def lowerCAmelCase__ ( self , a__ ) -> Any: '''simple docstring''' snake_case_ = "".join(a__ ).strip() return out_string def lowerCAmelCase__ ( self , a__ ) -> List[int]: '''simple docstring''' snake_case_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] ) if len(a__ ) > self.model_max_length: snake_case_ = input_ids[-self.model_max_length :] return input_ids def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]: '''simple docstring''' snake_case_ = 0 if os.path.isdir(a__ ): snake_case_ = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ = os.path.join( a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: snake_case_ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(a__ , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' " Please check that the vocabulary is not corrupted!" ) snake_case_ = token_index writer.write(",".join(a__ ) + "\n" ) index += 1 with open(a__ , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , a__ ) return vocab_file, emoji_file class _snake_case ( lowercase_ ): def __init__( self , a__ , a__ , a__ ) -> Any: '''simple docstring''' snake_case_ = vocab # same as swe snake_case_ = ids_to_tokens # same as bpe snake_case_ = emoji snake_case_ = np.max([len(a__ ) for w in self.vocab.keys()] ) snake_case_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) snake_case_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) snake_case_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) snake_case_ = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) snake_case_ = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) snake_case_ = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) snake_case_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" snake_case_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" snake_case_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self ) -> List[Any]: '''simple docstring''' return len(self.ids_to_tokens ) def lowerCAmelCase__ ( self , a__ ) -> str: '''simple docstring''' snake_case_ = self.content_repattera.sub("<URL>" , a__ ) snake_case_ = self.content_repattera.sub("<EMAIL>" , a__ ) snake_case_ = self.content_repattera.sub("<TEL>" , a__ ) snake_case_ = self.content_repattera.sub("<DATE>" , a__ ) snake_case_ = self.content_repattera.sub("<DATE>" , a__ ) snake_case_ = self.content_repattera.sub("<PRICE>" , a__ ) snake_case_ = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: snake_case_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def lowerCAmelCase__ ( self , a__ , a__=False ) -> Optional[Any]: '''simple docstring''' snake_case_ = text.replace(" " , "<SP>" ) snake_case_ = text.replace(" " , "<SP>" ) snake_case_ = text.replace("\r\n" , "<BR>" ) snake_case_ = text.replace("\n" , "<BR>" ) snake_case_ = text.replace("\r" , "<BR>" ) snake_case_ = text.replace("\t" , "<TAB>" ) snake_case_ = text.replace("—" , "ー" ) snake_case_ = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: snake_case_ = text.replace(a__ , a__ ) if clean: snake_case_ = self.clean_text(a__ ) def check_simbol(a__ ): snake_case_ = x.encode() if len(a__ ) == 1 and len(a__ ) == 2: snake_case_ = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC_2_A_1 and c <= 0XC_2_B_F) or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3) or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F) or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2) ): return True return False def checkuae(a__ ): snake_case_ = x.encode() if len(a__ ) == 1 and len(a__ ) == 3: snake_case_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F: return True return False snake_case_ = 0 snake_case_ = [] while pos < len(a__ ): snake_case_ = min(len(a__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 snake_case_ = [] # (token_id, token, pos) for e in range(a__ , a__ , -1 ): snake_case_ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(a__ ) > 2: snake_case_ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(a__ ) > 0: # the smallest token_id is adopted snake_case_ , snake_case_ , snake_case_ = sorted(a__ , key=lambda a__ : x[0] )[0] result.append(a__ ) snake_case_ = e else: snake_case_ = pos + 1 snake_case_ = text[pos:end] if check_simbol(a__ ): result.append("<KIGOU>" ) elif checkuae(a__ ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) snake_case_ = end return result def lowerCAmelCase__ ( self , a__ , a__="\n" ) -> Union[str, Any]: '''simple docstring''' snake_case_ = [] snake_case_ = [] snake_case_ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(a__ ) > 0: words.append(bytearray(a__ ).decode("utf-8" , errors="replace" ) ) snake_case_ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(a__ ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(a__ ) if len(a__ ) > 0: words.append(bytearray(a__ ).decode("utf-8" , errors="replace" ) ) snake_case_ = "".join(a__ ) return text
400
0
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller SCREAMING_SNAKE_CASE__ = 3 def UpperCAmelCase__ ( lowerCamelCase_ : int ): print('Generating primitive root of p' ) while True: __a : Optional[int] = random.randrange(3 , lowerCamelCase_ ) if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1: continue if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1: continue return g def UpperCAmelCase__ ( lowerCamelCase_ : int ): print('Generating prime p...' ) __a : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number. __a : Any = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p. __a : Optional[int] = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety. __a : Optional[int] = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) __a : List[Any] = (key_size, e_a, e_a, p) __a : str = (key_size, d) return public_key, private_key def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ): if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print('\nWARNING:' ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' 'Use a different name or delete these files and re-run this program.' ) sys.exit() __a , __a : Dict = generate_key(lowerCamelCase_ ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' , 'w' ) as fo: fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' , 'w' ) as fo: fo.write(f'''{private_key[0]},{private_key[1]}''' ) def UpperCAmelCase__ ( ): print('Making key files...' ) make_key_files('elgamal' , 2_0_4_8 ) print('Key files generation successful' ) if __name__ == "__main__": main()
577
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = XLNetTokenizer __SCREAMING_SNAKE_CASE : str = XLNetTokenizerFast __SCREAMING_SNAKE_CASE : Tuple = True __SCREAMING_SNAKE_CASE : Tuple = True def __lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a : str = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Tuple = '<s>' __a : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' __a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<eod>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_0_0_6 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) __a : Dict = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) __a : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __a : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) __a : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : Union[str, Any] = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) __a : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) __a : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' __a : str = XLNetTokenizer.from_pretrained('xlnet-base-cased' ) __a : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __a : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' __a : str = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
577
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ): # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: __lowerCAmelCase = ksize + 1 __lowerCAmelCase = np.zeros((ksize, ksize), dtype=np.floataa ) # each value for y in range(lowerCAmelCase_ ): for x in range(lowerCAmelCase_ ): # distance from center __lowerCAmelCase = x - ksize // 2 __lowerCAmelCase = y - ksize // 2 # degree to radiant __lowerCAmelCase = theta / 180 * np.pi __lowerCAmelCase = np.cos(_theta ) __lowerCAmelCase = np.sin(_theta ) # get kernel x __lowerCAmelCase = cos_theta * px + sin_theta * py # get kernel y __lowerCAmelCase = -sin_theta * px + cos_theta * py # fill kernel __lowerCAmelCase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _snake_case : Any = imread('../image_data/lena.jpg') # turn image in gray scale value _snake_case : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _snake_case : Dict = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: _snake_case : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _snake_case : int = out / out.max() * 255 _snake_case : int = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
53
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def a_ ( lowerCAmelCase_ : str=None ): if subparsers is not None: __lowerCAmelCase = subparsers.add_parser('env' ) else: __lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def a_ ( lowerCAmelCase_ : Optional[int] ): __lowerCAmelCase = torch.__version__ __lowerCAmelCase = torch.cuda.is_available() __lowerCAmelCase = is_xpu_available() __lowerCAmelCase = is_npu_available() __lowerCAmelCase = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __lowerCAmelCase = load_config_from_file(args.config_file ).to_dict() __lowerCAmelCase = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'PyTorch XPU available': str(lowerCAmelCase_ ), 'PyTorch NPU available': str(lowerCAmelCase_ ), 'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __lowerCAmelCase = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) __lowerCAmelCase = ( '\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else F"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __lowerCAmelCase = accelerate_config return info def a_ ( ): __lowerCAmelCase = env_command_parser() __lowerCAmelCase = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
53
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Any = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ["LayoutLMv2FeatureExtractor"] __lowerCAmelCase : Dict = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
284
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowerCAmelCase ( lowerCAmelCase_ ): """simple docstring""" A__ : Dict = '''segformer''' def __init__( self : Optional[Any] , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=4 , _snake_case : str=[2, 2, 2, 2] , _snake_case : Dict=[8, 4, 2, 1] , _snake_case : List[Any]=[32, 64, 160, 256] , _snake_case : Dict=[7, 3, 3, 3] , _snake_case : List[Any]=[4, 2, 2, 2] , _snake_case : Tuple=[1, 2, 5, 8] , _snake_case : Optional[Any]=[4, 4, 4, 4] , _snake_case : Optional[int]="gelu" , _snake_case : Optional[int]=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : int=0.1 , _snake_case : Any=0.02 , _snake_case : Optional[int]=0.1 , _snake_case : Union[str, Any]=1E-6 , _snake_case : Tuple=256 , _snake_case : str=255 , **_snake_case : Any , ): super().__init__(**_snake_case ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( '''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be''' ''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , _snake_case , ) __lowercase : Any = num_channels __lowercase : Optional[Any] = num_encoder_blocks __lowercase : List[str] = depths __lowercase : str = sr_ratios __lowercase : List[str] = hidden_sizes __lowercase : List[str] = patch_sizes __lowercase : Dict = strides __lowercase : Optional[int] = mlp_ratios __lowercase : List[str] = num_attention_heads __lowercase : Optional[int] = hidden_act __lowercase : List[Any] = hidden_dropout_prob __lowercase : Optional[Any] = attention_probs_dropout_prob __lowercase : List[Any] = classifier_dropout_prob __lowercase : str = initializer_range __lowercase : Optional[int] = drop_path_rate __lowercase : List[str] = layer_norm_eps __lowercase : List[str] = decoder_hidden_size __lowercase : Union[str, Any] = kwargs.get('''reshape_last_stage''' , _snake_case ) __lowercase : str = semantic_loss_ignore_index class __lowerCAmelCase ( lowerCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = version.parse('''1.11''' ) @property def snake_case_ ( self : Dict ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def snake_case_ ( self : int ): return 1E-4 @property def snake_case_ ( self : List[Any] ): return 12
284
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Dict = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
80
'''simple docstring''' lowerCAmelCase_ : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowerCAmelCase_ : int = [{"type": "code", "content": INSTALL_CONTENT}] lowerCAmelCase_ : Union[str, Any] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
489
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] ,__A : str ,__A : Dict=3 ,__A : str=32 ,__A : int=3 ,__A : Union[str, Any]=10 ,__A : Optional[Any]=[10, 20, 30, 40] ,__A : List[str]=[1, 1, 2, 1] ,__A : Optional[int]=True ,__A : Optional[Any]=True ,__A : int="relu" ,__A : Union[str, Any]=3 ,__A : List[Any]=None ,) -> int: _lowercase = parent _lowercase = batch_size _lowercase = image_size _lowercase = num_channels _lowercase = embeddings_size _lowercase = hidden_sizes _lowercase = depths _lowercase = is_training _lowercase = use_labels _lowercase = hidden_act _lowercase = num_labels _lowercase = scope _lowercase = len(__A ) def __UpperCAmelCase ( self : Optional[Any] ) -> Any: _lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase = self.get_config() return config, pixel_values def __UpperCAmelCase ( self : Any ) -> int: return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def __UpperCAmelCase ( self : Tuple ,__A : Any ,__A : Union[str, Any] ) -> List[Any]: _lowercase = FlaxRegNetModel(config=__A ) _lowercase = model(__A ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __UpperCAmelCase ( self : List[Any] ,__A : Optional[int] ,__A : Dict ) -> Any: _lowercase = self.num_labels _lowercase = FlaxRegNetForImageClassification(config=__A ) _lowercase = model(__A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Tuple ) -> Any: _lowercase = self.prepare_config_and_inputs() _lowercase , _lowercase = config_and_inputs _lowercase = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A_ ( UpperCAmelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = False def __UpperCAmelCase ( self : Dict ) -> None: _lowercase = FlaxRegNetModelTester(self ) _lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : Optional[int] ) -> Any: return def __UpperCAmelCase ( self : Dict ) -> int: _lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __UpperCAmelCase ( self : Optional[Any] ) -> Any: _lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __UpperCAmelCase ( self : Tuple ) -> Any: pass def __UpperCAmelCase ( self : Dict ) -> List[str]: _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase = model_class(__A ) _lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase = [*signature.parameters.keys()] _lowercase = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__A ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: def check_hidden_states_output(__A : Optional[Any] ,__A : Optional[int] ,__A : str ): _lowercase = model_class(__A ) _lowercase = model(**self._prepare_for_class(__A ,__A ) ) _lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowercase = self.model_tester.num_stages self.assertEqual(len(__A ) ,expected_num_stages + 1 ) _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase = True check_hidden_states_output(__A ,__A ,__A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase = True check_hidden_states_output(__A ,__A ,__A ) def __UpperCAmelCase ( self : Optional[Any] ) -> int: _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase = self._prepare_for_class(__A ,__A ) _lowercase = model_class(__A ) @jax.jit def model_jitted(__A : Any ,**__A : Union[str, Any] ): return model(pixel_values=__A ,**__A ) with self.subTest('JIT Enabled' ): _lowercase = model_jitted(**__A ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase = model_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) ,len(__A ) ) for jitted_output, output in zip(__A ,__A ): self.assertEqual(jitted_output.shape ,output.shape ) def SCREAMING_SNAKE_CASE__ ( ) -> str: _lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class A_ ( unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self : str ) -> Any: return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def __UpperCAmelCase ( self : Optional[Any] ) -> Any: _lowercase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) _lowercase = self.default_image_processor _lowercase = prepare_img() _lowercase = image_processor(images=__A ,return_tensors='np' ) _lowercase = model(**__A ) # verify the logits _lowercase = (1, 1000) self.assertEqual(outputs.logits.shape ,__A ) _lowercase = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 ) )
707
snake_case = """0.18.2""" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
535
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCAmelCase = {"tokenization_byt5": ["ByT5Tokenizer"]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
329
'''simple docstring''' def lowerCAmelCase_ ( __A : int = 50 ): '''simple docstring''' snake_case: Dict = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'{solution() = }')
329
1
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class a ( _a , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = RoCBertTokenizer __UpperCAmelCase = None __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = filter_non_english def __magic_name__ ( self : Union[str, Any] ): '''simple docstring''' super().setUp() snake_case__ : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d'''] snake_case__ : Any = {} snake_case__ : List[Any] = {} for i, value in enumerate(_A ): snake_case__ : List[Any] = i snake_case__ : List[str] = i snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] ) snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer: json.dump(_A , _A , ensure_ascii=_A ) with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer: json.dump(_A , _A , ensure_ascii=_A ) def __magic_name__ ( self : Optional[int] ): '''simple docstring''' snake_case__ : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) snake_case__ : str = tokenizer.tokenize('''你好[SEP]你是谁''' ) self.assertListEqual(_A , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_A ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_A ) , [5, 6, 2, 5, 7, 8] ) def __magic_name__ ( self : Optional[Any] ): '''simple docstring''' snake_case__ : List[Any] = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def __magic_name__ ( self : Any ): '''simple docstring''' snake_case__ : int = RoCBertBasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __magic_name__ ( self : List[Any] ): '''simple docstring''' snake_case__ : str = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def __magic_name__ ( self : Dict ): '''simple docstring''' snake_case__ : Dict = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __magic_name__ ( self : str ): '''simple docstring''' snake_case__ : str = RoCBertBasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def __magic_name__ ( self : Optional[int] ): '''simple docstring''' snake_case__ : Tuple = RoCBertBasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __magic_name__ ( self : int ): '''simple docstring''' snake_case__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __magic_name__ ( self : int ): '''simple docstring''' snake_case__ : Dict = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __magic_name__ ( self : int ): '''simple docstring''' snake_case__ : Any = RoCBertBasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def __magic_name__ ( self : List[str] ): '''simple docstring''' snake_case__ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] snake_case__ : str = {} for i, token in enumerate(_A ): snake_case__ : List[str] = i snake_case__ : Optional[int] = RoCBertWordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def __magic_name__ ( self : Dict ): '''simple docstring''' self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def __magic_name__ ( self : Tuple ): '''simple docstring''' self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def __magic_name__ ( self : Union[str, Any] ): '''simple docstring''' self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def __magic_name__ ( self : Dict ): '''simple docstring''' snake_case__ : Optional[int] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) if self.test_rust_tokenizer: snake_case__ : Dict = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) def __magic_name__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) snake_case__ : List[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" snake_case__ : Union[str, Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) snake_case__ : Dict = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False snake_case__ : Union[str, Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def __magic_name__ ( self : List[str] ): '''simple docstring''' snake_case__ : Dict = ['''的''', '''人''', '''有'''] snake_case__ : Tuple = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case__ : Dict = True snake_case__ : List[Any] = self.tokenizer_class.from_pretrained(_A , **_A ) snake_case__ : str = self.rust_tokenizer_class.from_pretrained(_A , **_A ) snake_case__ : int = tokenizer_p.encode(_A , add_special_tokens=_A ) snake_case__ : str = tokenizer_r.encode(_A , add_special_tokens=_A ) snake_case__ : str = tokenizer_r.convert_ids_to_tokens(_A ) snake_case__ : str = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) snake_case__ : Tuple = False snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) snake_case__ : Tuple = self.tokenizer_class.from_pretrained(_A , **_A ) snake_case__ : int = tokenizer_r.encode(_A , add_special_tokens=_A ) snake_case__ : str = tokenizer_p.encode(_A , add_special_tokens=_A ) snake_case__ : str = tokenizer_r.convert_ids_to_tokens(_A ) snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". snake_case__ : Union[str, Any] = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) @slow def __magic_name__ ( self : Optional[Any] ): '''simple docstring''' snake_case__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) snake_case__ : Optional[Any] = tokenizer.encode('''你好''' , add_special_tokens=_A ) snake_case__ : Tuple = tokenizer.encode('''你是谁''' , add_special_tokens=_A ) snake_case__ : int = tokenizer.build_inputs_with_special_tokens(_A ) snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def __magic_name__ ( self : Any ): '''simple docstring''' snake_case__ : Any = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case__ : Any = '''你好,你是谁''' snake_case__ : Optional[Any] = tokenizer.tokenize(_A ) snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(_A ) snake_case__ : Dict = tokenizer.convert_tokens_to_shape_ids(_A ) snake_case__ : str = tokenizer.convert_tokens_to_pronunciation_ids(_A ) snake_case__ : str = tokenizer.prepare_for_model( _A , _A , _A , add_special_tokens=_A ) snake_case__ : Union[str, Any] = tokenizer.encode_plus(_A , add_special_tokens=_A ) self.assertEqual(_A , _A )
714
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ): """simple docstring""" snake_case__ : Any = BigBirdConfig.from_json_file(__lowerCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: snake_case__ : str = BigBirdForQuestionAnswering(__lowerCAmelCase ) else: snake_case__ : Tuple = BigBirdForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) lowerCAmelCase__ : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
502
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case__ : int = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) snake_case__ : Union[str, Any] = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) snake_case__ : Dict = False snake_case__ : Optional[int] = False def a_ ( self , a__ , a__ , a__=False ): __SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ ) if return_labels: if model_class in get_values(a__ ): __SCREAMING_SNAKE_CASE : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ): __SCREAMING_SNAKE_CASE : Tuple = parent __SCREAMING_SNAKE_CASE : str = batch_size __SCREAMING_SNAKE_CASE : int = seq_length __SCREAMING_SNAKE_CASE : Any = is_training __SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask __SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids __SCREAMING_SNAKE_CASE : Dict = use_labels __SCREAMING_SNAKE_CASE : List[Any] = vocab_size __SCREAMING_SNAKE_CASE : str = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings __SCREAMING_SNAKE_CASE : int = type_vocab_size __SCREAMING_SNAKE_CASE : Any = type_sequence_label_size __SCREAMING_SNAKE_CASE : List[str] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = num_labels __SCREAMING_SNAKE_CASE : Any = num_choices __SCREAMING_SNAKE_CASE : List[str] = scope __SCREAMING_SNAKE_CASE : Optional[int] = embedding_size def a_ ( self ): __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Dict = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE : List[str] = None __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = None if self.use_labels: __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Optional[Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : List[str] = TFMobileBertModel(config=a__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ ) __SCREAMING_SNAKE_CASE : Any = [input_ids, input_mask] __SCREAMING_SNAKE_CASE : str = model(a__ ) __SCREAMING_SNAKE_CASE : int = model(a__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForMaskedLM(config=a__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : str = TFMobileBertForNextSentencePrediction(config=a__ ) __SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Tuple = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForPreTraining(config=a__ ) __SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Optional[int] = model(a__ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : Any = self.num_labels __SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForSequenceClassification(config=a__ ) __SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Dict = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : List[Any] = self.num_choices __SCREAMING_SNAKE_CASE : int = TFMobileBertForMultipleChoice(config=a__ ) __SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : int = TFMobileBertForTokenClassification(config=a__ ) __SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Any = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): __SCREAMING_SNAKE_CASE : Dict = TFMobileBertForQuestionAnswering(config=a__ ) __SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ ( self ): __SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) : Union[str, Any] = config_and_inputs __SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def a_ ( self ): __SCREAMING_SNAKE_CASE : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) __SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a_ ( self ): self.config_tester.run_common_tests() def a_ ( self ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ ) @slow def a_ ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: __SCREAMING_SNAKE_CASE : Any = TFMobileBertModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_tf class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def a_ ( self ): __SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) __SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE : str = model(a__ )[0] __SCREAMING_SNAKE_CASE : Dict = [1, 6, 30522] self.assertEqual(output.shape , a__ ) __SCREAMING_SNAKE_CASE : str = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
211
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowercase = datasets.logging.get_logger(__name__) lowercase = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowercase = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowercase = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def a_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def a_ ( self , a__ ): if self.config_name == "default": __SCREAMING_SNAKE_CASE : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def a_ ( self , a__ , a__ , a__ , a__=None , a__=False ): if gpus is None: __SCREAMING_SNAKE_CASE : List[str] = 1 if torch.cuda.is_available() else 0 __SCREAMING_SNAKE_CASE : Any = {"src": sources, "mt": predictions, "ref": references} __SCREAMING_SNAKE_CASE : int = [dict(zip(a__ , a__ ) ) for t in zip(*data.values() )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.scorer.predict(a__ , gpus=a__ , progress_bar=a__ ) return {"mean_score": mean_score, "scores": scores}
211
1
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _snake_case : str = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp _snake_case : List[Any] = 5 _snake_case : Union[str, Any] = 10 @require_sentencepiece @require_tokenizers class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = SpeechaTextTokenizer __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : str = True def __snake_case ( self : Any ) -> Tuple: super().setUp() __snake_case : List[Any] = sp.SentencePieceProcessor() spm_model.Load(lowerCamelCase ) __snake_case : int = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCamelCase ) )] __snake_case : int = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __snake_case : Tuple = Path(self.tmpdirname ) save_json(lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] ) __snake_case : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : Dict ) -> Optional[int]: __snake_case : Tuple = "<pad>" __snake_case : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase ) def __snake_case ( self : str ) -> str: __snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowerCamelCase ) , 1001 ) def __snake_case ( self : Tuple ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def __snake_case ( self : Dict ) -> Optional[int]: __snake_case : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [289, 50, 14, 174, 386] , ) __snake_case : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) __snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual(lowerCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __snake_case ( self : List[Any] ) -> str: # fmt: off __snake_case : List[Any] = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class a (unittest.TestCase ): """simple docstring""" __UpperCAmelCase : int = "valhalla/s2t_mustc_multilinguial_medium" __UpperCAmelCase : str = "C'est trop cool" __UpperCAmelCase : Optional[Any] = "Esto es genial" @classmethod def __snake_case ( cls : List[str] ) -> int: __snake_case : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __snake_case ( self : Any ) -> Optional[int]: self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def __snake_case ( self : str ) -> List[Any]: self.assertEqual(self.tokenizer.vocab_size , 10000 ) def __snake_case ( self : List[Any] ) -> str: self.assertIn(lowerCamelCase , self.tokenizer.all_special_ids ) __snake_case : Union[str, Any] = [ES_CODE, 4, 1601, 47, 7647, 2] __snake_case : int = self.tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase ) __snake_case : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase ) def __snake_case ( self : List[Any] ) -> str: __snake_case : Any = "fr" __snake_case : Union[str, Any] = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , lowerCamelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __snake_case ( self : Any ) -> List[Any]: __snake_case : Dict = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __snake_case : List[str] = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
203
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _snake_case : Optional[int] = "▁" _snake_case : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = BertGenerationTokenizer __UpperCAmelCase : List[Any] = False __UpperCAmelCase : List[Any] = True def __snake_case ( self : Optional[int] ) -> Optional[int]: super().setUp() __snake_case : Tuple = BertGenerationTokenizer(lowerCamelCase , keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : Dict ) -> int: __snake_case : str = "<s>" __snake_case : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase ) def __snake_case ( self : str ) -> Optional[Any]: __snake_case : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(lowerCamelCase ) , 1002 ) def __snake_case ( self : List[str] ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __snake_case ( self : List[str] ) -> Union[str, Any]: __snake_case : List[Any] = BertGenerationTokenizer(lowerCamelCase , keep_accents=lowerCamelCase ) __snake_case : int = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , ) __snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __snake_case ( self : str ) -> List[Any]: return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) @slow def __snake_case ( self : Tuple ) -> Union[str, Any]: __snake_case : Union[str, Any] = "Hello World!" __snake_case : List[str] = [18536, 2260, 101] self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) ) @slow def __snake_case ( self : str ) -> Optional[int]: __snake_case : List[Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) __snake_case : Any = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) ) @require_torch @slow def __snake_case ( self : Optional[Any] ) -> str: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence __snake_case : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] __snake_case : Union[str, Any] = " ".join(lowerCamelCase ) __snake_case : Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase ) __snake_case : Optional[Any] = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase ) __snake_case : List[Any] = BertGenerationConfig() __snake_case : Dict = BertGenerationEncoder(lowerCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowerCamelCase ) model(**lowerCamelCase ) @slow def __snake_case ( self : List[Any] ) -> List[Any]: # fmt: off __snake_case : Optional[Any] = {"input_ids": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
203
1
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def __a(SCREAMING_SNAKE_CASE_ : Union[List, PIL.Image.Image, torch.Tensor] ): '''simple docstring''' warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , SCREAMING_SNAKE_CASE_ , ) if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): _lowerCAmelCase = [image] if isinstance(image[0] , PIL.Image.Image ): _lowerCAmelCase , _lowerCAmelCase = image[0].size _lowerCAmelCase , _lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 _lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] _lowerCAmelCase = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 ) _lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0 _lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 ) _lowerCAmelCase = 2.0 * image - 1.0 _lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) elif isinstance(image[0] , torch.Tensor ): _lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) return image def __a(SCREAMING_SNAKE_CASE_ : Union[List, PIL.Image.Image, torch.Tensor] ): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): return mask elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): _lowerCAmelCase = [mask] if isinstance(mask[0] , PIL.Image.Image ): _lowerCAmelCase , _lowerCAmelCase = mask[0].size _lowerCAmelCase , _lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _lowerCAmelCase = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] _lowerCAmelCase = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 ) _lowerCAmelCase = mask.astype(np.floataa ) / 255.0 _lowerCAmelCase = 0 _lowerCAmelCase = 1 _lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) elif isinstance(mask[0] , torch.Tensor ): _lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) return mask class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : UNetaDModel __lowerCamelCase : RePaintScheduler def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: super().__init__() self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) @torch.no_grad() def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 250 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 10 , _lowerCAmelCase = 10 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]: _lowerCAmelCase = image _lowerCAmelCase = _preprocess_image(_lowerCAmelCase ) _lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype ) _lowerCAmelCase = _preprocess_mask(_lowerCAmelCase ) _lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype ) _lowerCAmelCase = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) _lowerCAmelCase = original_image.shape _lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.device ) _lowerCAmelCase = eta _lowerCAmelCase = self.scheduler.timesteps[0] + 1 _lowerCAmelCase = generator[0] if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual _lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample # compute previous image: x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t _lowerCAmelCase = self.scheduler.undo_step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = t _lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(_lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCAmelCase )
18
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase_ = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase_ = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase_ = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase_ = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase_ = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Dict = VOCAB_FILES_NAMES A__ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP A__ : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION A__ : Optional[Any] = DPRContextEncoderTokenizer class _snake_case ( __snake_case ): '''simple docstring''' A__ : Tuple = VOCAB_FILES_NAMES A__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP A__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION A__ : Optional[Any] = DPRQuestionEncoderTokenizer UpperCamelCase_ = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase_ = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(__snake_case ) class _snake_case : '''simple docstring''' def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[bool] = None ,**lowerCamelCase_: Optional[Any] ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,) elif titles is None or texts is None: UpperCAmelCase_ : Tuple = titles if texts is None else texts return super().__call__( lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : Any = titles if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [titles] UpperCAmelCase_ : Tuple = texts if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [texts] UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) UpperCAmelCase_ : int = questions if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [questions] * n_passages assert len(lowerCamelCase_ ) == len( lowerCamelCase_ ), F'''There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts.''' UpperCAmelCase_ : int = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""] UpperCAmelCase_ : str = super().__call__(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""] UpperCAmelCase_ : Any = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ ,lowerCamelCase_ ) ] } if return_attention_mask is not False: UpperCAmelCase_ : Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : List[str] = attention_mask return self.pad(lowerCamelCase_ ,padding=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: BatchEncoding ,lowerCamelCase_: DPRReaderOutput ,lowerCamelCase_: int = 16 ,lowerCamelCase_: int = 64 ,lowerCamelCase_: int = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Optional[int] = reader_input["""input_ids"""] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sorted(range(lowerCamelCase_ ) ,reverse=lowerCamelCase_ ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=lowerCamelCase_ ,top_spans=lowerCamelCase_ ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=lowerCamelCase_ ,start_index=lowerCamelCase_ ,end_index=lowerCamelCase_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(lowerCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def A__ ( self: Any ,lowerCamelCase_: List[int] ,lowerCamelCase_: List[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Union[str, Any] = [] for start_index, start_score in enumerate(lowerCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : Optional[int] = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_ : x[1] ,reverse=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]''' UpperCAmelCase_ : Any = end_index - start_index + 1 assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class _snake_case ( __snake_case , __snake_case ): '''simple docstring''' A__ : int = VOCAB_FILES_NAMES A__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION A__ : int = ["input_ids", "attention_mask"] A__ : str = DPRReaderTokenizer
322
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel UpperCamelCase_ = HfApi() UpperCamelCase_ = {} # fmt: off UpperCamelCase_ = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) UpperCamelCase_ = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) UpperCamelCase_ = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) UpperCamelCase_ = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) UpperCamelCase_ = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) UpperCamelCase_ = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) UpperCamelCase_ = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) UpperCamelCase_ = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) UpperCamelCase_ = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) UpperCamelCase_ = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) UpperCamelCase_ = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) UpperCamelCase_ = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) UpperCamelCase_ = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) UpperCamelCase_ = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) UpperCamelCase_ = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on UpperCamelCase_ = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": UpperCamelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F"Started running {mod.modelId}!!!") if mod.modelId.startswith('''CompVis'''): UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) UpperCamelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) UpperCamelCase_ = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): UpperCamelCase_ = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F"{mod.modelId} has passed successfully!!!")
322
1
'''simple docstring''' import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class __snake_case ( __UpperCamelCase ,unittest.TestCase): """simple docstring""" lowercase = PriorTransformer lowercase = 'hidden_states' @property def __lowercase ( self : Dict ) -> Any: lowerCAmelCase_ : List[Any] = 4 lowerCAmelCase_ : Optional[int] = 8 lowerCAmelCase_ : Optional[Any] = 7 lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase ) lowerCAmelCase_ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase ) lowerCAmelCase_ : int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __lowercase ( self : List[Any] , lowerCamelCase : Tuple=0 ) -> Any: torch.manual_seed(lowerCamelCase ) lowerCAmelCase_ : Optional[int] = 4 lowerCAmelCase_ : Optional[Any] = 8 lowerCAmelCase_ : List[str] = 7 lowerCAmelCase_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase ) lowerCAmelCase_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase ) lowerCAmelCase_ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def __lowercase ( self : Optional[int] ) -> List[Any]: return (4, 8) @property def __lowercase ( self : Optional[Any] ) -> List[str]: return (4, 8) def __lowercase ( self : str ) -> List[Any]: lowerCAmelCase_ : Any = { """num_attention_heads""": 2, """attention_head_dim""": 4, """num_layers""": 2, """embedding_dim""": 8, """num_embeddings""": 7, """additional_embeddings""": 4, } lowerCAmelCase_ : str = self.dummy_input return init_dict, inputs_dict def __lowercase ( self : Any ) -> List[str]: lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = PriorTransformer.from_pretrained( """hf-internal-testing/prior-dummy""" , output_loading_info=lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(lowerCamelCase ) lowerCAmelCase_ : List[str] = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def __lowercase ( self : List[str] ) -> Optional[Any]: lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase ) lowerCAmelCase_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Tuple = [*signature.parameters.keys()] lowerCAmelCase_ : Any = ["""hidden_states""", """timestep"""] self.assertListEqual(arg_names[:2] , lowerCamelCase ) def __lowercase ( self : str ) -> Union[str, Any]: lowerCAmelCase_ : Dict = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" ) lowerCAmelCase_ : Optional[int] = model.to(lowerCamelCase ) if hasattr(lowerCamelCase , """set_default_attn_processor""" ): model.set_default_attn_processor() lowerCAmelCase_ : Any = self.get_dummy_seed_input() with torch.no_grad(): lowerCAmelCase_ : Optional[Any] = model(**lowerCamelCase )[0] lowerCAmelCase_ : int = output[0, :5].flatten().cpu() print(lowerCamelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowerCAmelCase_ : str = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) ) @slow class __snake_case ( unittest.TestCase): """simple docstring""" def __lowercase ( self : Dict , lowerCamelCase : str=1 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Dict=77 , lowerCamelCase : Dict=0 ) -> Dict: torch.manual_seed(lowerCamelCase ) lowerCAmelCase_ : Tuple = batch_size lowerCAmelCase_ : int = embedding_dim lowerCAmelCase_ : str = num_embeddings lowerCAmelCase_ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase ) lowerCAmelCase_ : List[str] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase ) lowerCAmelCase_ : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __lowercase ( self : List[Any] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def __lowercase ( self : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] ) -> List[Any]: lowerCAmelCase_ : List[str] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" ) model.to(lowerCamelCase ) lowerCAmelCase_ : str = self.get_dummy_seed_input(seed=lowerCamelCase ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**lowerCamelCase )[0] assert list(sample.shape ) == [1, 7_68] lowerCAmelCase_ : Optional[Any] = sample[0, :8].flatten().cpu() print(lowerCamelCase ) lowerCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase ) assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
275
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _a : List[str] = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] _a : str = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] _a : Tuple = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): _a : Dict = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys _a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
479
0
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ (lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase__ = IFInpaintingSuperResolutionPipeline lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __a ( self , _a , _a=0 ) -> List[str]: if str(UpperCAmelCase__ ).startswith("mps" ): lowerCAmelCase_ = torch.manual_seed(UpperCAmelCase__ ) else: lowerCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) lowerCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowerCAmelCase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self ) -> Tuple: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self ) -> Any: super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self ) -> Any: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self ) -> Union[str, Any]: self._test_save_load_local() def __a ( self ) -> Tuple: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
700
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __a ( self ) -> Dict: lowerCAmelCase_ = 1 lowerCAmelCase_ = 3 lowerCAmelCase_ = (32, 32) lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image @property def __a ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def __a ( self ) -> Union[str, Any]: torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def __a ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_a ) @property def __a ( self ) -> List[str]: def extract(*_a , **_a ): class __magic_name__ : def __init__( self ) -> List[str]: lowerCAmelCase_ = torch.ones([0] ) def __a ( self , _a ) -> int: self.pixel_values.to(_a ) return self return Out() return extract def __a ( self ) -> Dict: lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.dummy_cond_unet lowerCAmelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , ) lowerCAmelCase_ = self.dummy_vae lowerCAmelCase_ = self.dummy_text_encoder lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk lowerCAmelCase_ = StableDiffusionPipeline( unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , ) lowerCAmelCase_ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "A painting of a squirrel eating a burger" lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 ) lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) lowerCAmelCase_ = output.images lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.dummy_cond_unet lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a ) lowerCAmelCase_ = self.dummy_vae lowerCAmelCase_ = self.dummy_text_encoder lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk lowerCAmelCase_ = StableDiffusionPipeline( unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , ) lowerCAmelCase_ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "A painting of a squirrel eating a burger" lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 ) lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) lowerCAmelCase_ = output.images lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Any: lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_a ) assert isinstance(_a , _a ) assert isinstance(pipe.scheduler , _a ) assert pipe.safety_checker is None lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_a ) lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(_a ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __a ( self ) -> Any: lowerCAmelCase_ = self.dummy_cond_unet lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a ) lowerCAmelCase_ = self.dummy_vae lowerCAmelCase_ = self.dummy_text_encoder lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 lowerCAmelCase_ = unet.half() lowerCAmelCase_ = vae.half() lowerCAmelCase_ = bert.half() # make sure here that pndm scheduler skips prk lowerCAmelCase_ = StableDiffusionPipeline( unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , ) lowerCAmelCase_ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "A painting of a squirrel eating a burger" lowerCAmelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> Any: lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a ) lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCAmelCase_ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) lowerCAmelCase_ = 4003660346 lowerCAmelCase_ = 7 # without safety guidance (sld_guidance_scale = 0) lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a ) lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCAmelCase_ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "padme amidala taking a bath artwork, safe for work, no nudity" lowerCAmelCase_ = 2734971755 lowerCAmelCase_ = 7 lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> int: lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) lowerCAmelCase_ = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) lowerCAmelCase_ = 1044355234 lowerCAmelCase_ = 12 lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 lowerCAmelCase_ = torch.manual_seed(_a ) lowerCAmelCase_ = sd_pipe( [prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
226
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> list: '''simple docstring''' if bit_count < 0: raise ValueError("""The given input must be positive""" ) # get the generated string sequence __UpperCAmelCase : str = gray_code_sequence_string(UpperCAmelCase__ ) # # convert them to integers for i in range(len(UpperCAmelCase__ ) ): __UpperCAmelCase : Union[str, Any] = int(sequence[i] , 2 ) return sequence def lowerCamelCase ( _UpperCamelCase : Tuple ) -> list: '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __UpperCAmelCase : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __UpperCAmelCase : Union[str, Any] = gray_code_sequence_string(bit_count - 1 ) __UpperCAmelCase : Tuple = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __UpperCAmelCase : Union[str, Any] = '0' + smaller_sequence[i] sequence.append(UpperCAmelCase__ ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __UpperCAmelCase : str = '1' + smaller_sequence[i] sequence.append(UpperCAmelCase__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
139
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 A_ : Optional[Any] = data_utils.TransfoXLTokenizer A_ : Union[str, Any] = data_utils.TransfoXLCorpus A_ : Any = data_utils A_ : Optional[Any] = data_utils def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCAmelCase__ , 'rb' ) as fp: UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__ torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: str = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ ) UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ ) UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCamelCase_: List[str] = TransfoXLConfig() else: UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ ) UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' ) with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ : int = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) A_ : Tuple = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
57
0
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , lowercase__ : Tuple ) ->Union[str, Any]: '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden _UpperCamelCase : Union[str, Any] = deepcopy(lowercase__ ) elif os.path.exists(lowercase__ ): with io.open(lowercase__ , "r" , encoding="utf-8" ) as f: _UpperCamelCase : Dict = json.load(lowercase__ ) else: try: _UpperCamelCase : List[str] = baseaa.urlsafe_baadecode(lowercase__ ).decode("utf-8" ) _UpperCamelCase : Union[str, Any] = json.loads(lowercase__ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) _UpperCamelCase : Union[str, Any] = config self.set_stage_and_offload() def snake_case__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = self.get_value("zero_optimization.stage" , -1 ) # offload _UpperCamelCase : Tuple = False if self.is_zeroa() or self.is_zeroa(): _UpperCamelCase : Optional[int] = set(["cpu", "nvme"] ) _UpperCamelCase : Tuple = set( [ self.get_value("zero_optimization.offload_optimizer.device" ), self.get_value("zero_optimization.offload_param.device" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: _UpperCamelCase : Tuple = True def snake_case__ ( self : Tuple , lowercase__ : List[Any] ) ->List[Any]: '''simple docstring''' _UpperCamelCase : int = self.config # find the config node of interest if it exists _UpperCamelCase : Optional[int] = ds_key_long.split("." ) _UpperCamelCase : List[str] = nodes.pop() for node in nodes: _UpperCamelCase : Optional[int] = config.get(lowercase__ ) if config is None: return None, ds_key return config, ds_key def snake_case__ ( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : List[str]=None ) ->List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Dict = self.find_config_node(lowercase__ ) if config is None: return default return config.get(lowercase__ , lowercase__ ) def snake_case__ ( self : str , lowercase__ : Any , lowercase__ : Tuple=False ) ->int: '''simple docstring''' _UpperCamelCase : Dict = self.config # find the config node of interest if it exists _UpperCamelCase : Union[str, Any] = ds_key_long.split("." ) for node in nodes: _UpperCamelCase : Union[str, Any] = config _UpperCamelCase : Optional[int] = config.get(lowercase__ ) if config is None: if must_exist: raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(lowercase__ ) def snake_case__ ( self : List[str] , lowercase__ : int ) ->str: '''simple docstring''' _UpperCamelCase : int = self.get_value(lowercase__ ) return False if value is None else bool(lowercase__ ) def snake_case__ ( self : Optional[Any] , lowercase__ : Optional[Any] ) ->Optional[int]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = self.get_value(lowercase__ ) return False if value is None else not bool(lowercase__ ) def snake_case__ ( self : Dict ) ->Tuple: '''simple docstring''' return self._stage == 2 def snake_case__ ( self : Any ) ->int: '''simple docstring''' return self._stage == 3 def snake_case__ ( self : Union[str, Any] ) ->List[str]: '''simple docstring''' return self._offload class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any] , lowercase__ : Dict ) ->Optional[int]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = engine def snake_case__ ( self : Optional[Any] , lowercase__ : int , **lowercase__ : Dict ) ->List[Any]: '''simple docstring''' self.engine.backward(lowercase__ , **lowercase__ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase__ : Optional[Any] ) ->Tuple: '''simple docstring''' super().__init__(lowercase__ , device_placement=lowercase__ , scaler=lowercase__ ) _UpperCamelCase : Dict = hasattr(self.optimizer , "overflow" ) def snake_case__ ( self : List[Any] , lowercase__ : Union[str, Any]=None ) ->List[str]: '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def snake_case__ ( self : Tuple ) ->List[str]: '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def snake_case__ ( self : Tuple ) ->List[str]: '''simple docstring''' if self.__has_overflow__: return self.optimizer.overflow return False class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Any , lowercase__ : List[str] , lowercase__ : List[Any] ) ->Any: '''simple docstring''' super().__init__(lowercase__ , lowercase__ ) def snake_case__ ( self : Union[str, Any] ) ->List[str]: '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=0.0_0_1 , lowercase__ : Tuple=0 , **lowercase__ : Union[str, Any] ) ->List[Any]: '''simple docstring''' _UpperCamelCase : str = params _UpperCamelCase : Any = lr _UpperCamelCase : List[Any] = weight_decay _UpperCamelCase : Optional[int] = kwargs class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=0 , **lowercase__ : List[Any] ) ->str: '''simple docstring''' _UpperCamelCase : List[Any] = optimizer _UpperCamelCase : List[Any] = total_num_steps _UpperCamelCase : int = warmup_num_steps _UpperCamelCase : str = kwargs
204
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ : Union[str, Any] = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Tuple = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : str = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[Any] = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
204
1
from collections.abc import Sequence from queue import Queue class _UpperCamelCase: def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ): '''simple docstring''' __a : Tuple = start __a : Dict = end __a : List[str] = val __a : List[Any] = (start + end) // 2 __a : Optional[Any] = left __a : List[str] = right def __repr__( self : Dict ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _UpperCamelCase: def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Tuple = collection __a : Dict = function if self.collection: __a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if start == end: return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] ) __a : Tuple = (start + end) // 2 __a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ ) return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if node.start == i and node.end == i: __a : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : int = self.fn(node.left.val , node.right.val ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , ) else: # range in right child tree return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.root is not None: __a : Tuple = Queue() queue.put(self.root ) while not queue.empty(): __a : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
47
from math import factorial def UpperCamelCase ( snake_case__ , snake_case__): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k") return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k)) if __name__ == "__main__": print( '''The number of five-card hands possible from a standard''', f"fifty-two card deck is: {combinations(52, 5)}\n", ) print( '''If a class of 40 students must be arranged into groups of''', f"4 for group projects, there are {combinations(40, 4)} ways", '''to arrange them.\n''', ) print( '''If 10 teams are competing in a Formula One race, there''', f"are {combinations(10, 3)} ways that first, second and", '''third place can be awarded.''', )
659
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase_ : List[Any] = logging.get_logger(__name__) class UpperCamelCase ( lowerCamelCase__ ): A__ = ["""pixel_values"""] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ): """simple docstring""" super().__init__(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 256} _SCREAMING_SNAKE_CASE : Any = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _SCREAMING_SNAKE_CASE : Any = get_size_dict(__lowerCamelCase , param_name="crop_size" ) _SCREAMING_SNAKE_CASE : Dict = do_resize _SCREAMING_SNAKE_CASE : str = size _SCREAMING_SNAKE_CASE : Tuple = resample _SCREAMING_SNAKE_CASE : List[Any] = do_center_crop _SCREAMING_SNAKE_CASE : Dict = crop_size _SCREAMING_SNAKE_CASE : Optional[int] = do_rescale _SCREAMING_SNAKE_CASE : List[str] = rescale_factor _SCREAMING_SNAKE_CASE : List[Any] = do_normalize _SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ): """simple docstring""" return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): """simple docstring""" return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize _SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size _SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample _SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else self.crop_size _SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" ) _SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale _SCREAMING_SNAKE_CASE : str = rescale_factor if rescale_factor is not None else self.rescale_factor _SCREAMING_SNAKE_CASE : int = do_normalize if do_normalize is not None else self.do_normalize _SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean _SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std _SCREAMING_SNAKE_CASE : Any = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _SCREAMING_SNAKE_CASE : Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: _SCREAMING_SNAKE_CASE : str = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_center_crop: _SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images] if do_rescale: _SCREAMING_SNAKE_CASE : Any = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: _SCREAMING_SNAKE_CASE : Tuple = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] _SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] _SCREAMING_SNAKE_CASE : int = {'''pixel_values''': images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : str = target_sizes.numpy() _SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(__lowerCamelCase ) ): _SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = logits.argmax(dim=1 ) _SCREAMING_SNAKE_CASE : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
702
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): A__ = ["""image_processor""", """tokenizer"""] A__ = """AutoImageProcessor""" A__ = """AutoTokenizer""" def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case__ , ) _SCREAMING_SNAKE_CASE : Any = kwargs.pop("feature_extractor" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(snake_case__ , snake_case__ ) _SCREAMING_SNAKE_CASE : Any = self.image_processor _SCREAMING_SNAKE_CASE : Any = False def __call__( self , *snake_case__ , **snake_case__ ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*snake_case__ , **snake_case__ ) _SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("images" , snake_case__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("text" , snake_case__ ) if len(snake_case__ ) > 0: _SCREAMING_SNAKE_CASE : Optional[Any] = args[0] _SCREAMING_SNAKE_CASE : List[str] = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(snake_case__ , *snake_case__ , **snake_case__ ) if text is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(snake_case__ , **snake_case__ ) if text is None: return inputs elif images is None: return encodings else: _SCREAMING_SNAKE_CASE : List[Any] = encodings["input_ids"] return inputs def __SCREAMING_SNAKE_CASE ( self , *snake_case__ , **snake_case__ ): """simple docstring""" return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def __SCREAMING_SNAKE_CASE ( self , *snake_case__ , **snake_case__ ): """simple docstring""" return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @contextmanager def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : str = self.tokenizer yield _SCREAMING_SNAKE_CASE : str = self.image_processor _SCREAMING_SNAKE_CASE : int = False def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=False , snake_case__=None ): """simple docstring""" if added_vocab is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_added_vocab() _SCREAMING_SNAKE_CASE : int = {} while tokens: _SCREAMING_SNAKE_CASE : List[Any] = re.search(r"<s_(.*?)>" , snake_case__ , re.IGNORECASE ) if start_token is None: break _SCREAMING_SNAKE_CASE : List[Any] = start_token.group(1 ) _SCREAMING_SNAKE_CASE : Any = re.search(rF'''</s_{key}>''' , snake_case__ , re.IGNORECASE ) _SCREAMING_SNAKE_CASE : Tuple = start_token.group() if end_token is None: _SCREAMING_SNAKE_CASE : Optional[int] = tokens.replace(snake_case__ , "" ) else: _SCREAMING_SNAKE_CASE : Any = end_token.group() _SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(snake_case__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(snake_case__ ) _SCREAMING_SNAKE_CASE : str = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , snake_case__ , re.IGNORECASE ) if content is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _SCREAMING_SNAKE_CASE : Dict = self.tokenajson(snake_case__ , is_inner_value=snake_case__ , added_vocab=snake_case__ ) if value: if len(snake_case__ ) == 1: _SCREAMING_SNAKE_CASE : Optional[Any] = value[0] _SCREAMING_SNAKE_CASE : Dict = value else: # leaf nodes _SCREAMING_SNAKE_CASE : Dict = [] for leaf in content.split(r"<sep/>" ): _SCREAMING_SNAKE_CASE : int = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _SCREAMING_SNAKE_CASE : Optional[Any] = leaf[1:-2] # for categorical special tokens output[key].append(snake_case__ ) if len(output[key] ) == 1: _SCREAMING_SNAKE_CASE : Dict = output[key][0] _SCREAMING_SNAKE_CASE : List[Any] = tokens[tokens.find(snake_case__ ) + len(snake_case__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=snake_case__ , added_vocab=snake_case__ ) if len(snake_case__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , ) return self.image_processor_class @property def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , ) return self.image_processor
295
0
"""simple docstring""" # Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCAmelCase__ =2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model lowerCAmelCase__ ={ # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names lowerCAmelCase__ ={} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ ="facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: lowerCAmelCase__ ="allenai" def _a ( UpperCAmelCase__ ) -> Any: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} __SCREAMING_SNAKE_CASE = dict((re.sub(r'''@@$''' , '''''' , UpperCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , UpperCAmelCase__ ), v) for k, v in d.items() ) __SCREAMING_SNAKE_CASE = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] __SCREAMING_SNAKE_CASE = d[k] # restore return da def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple: # prep assert os.path.exists(UpperCAmelCase__ ) os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models __SCREAMING_SNAKE_CASE = basename(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = dirname(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel __SCREAMING_SNAKE_CASE = cls.hub_models() __SCREAMING_SNAKE_CASE = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} __SCREAMING_SNAKE_CASE = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"""using checkpoint {checkpoint_file}""" ) __SCREAMING_SNAKE_CASE = hub_utils.from_pretrained( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , archive_map=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vars(chkpt['''args''']['''model'''] ) __SCREAMING_SNAKE_CASE = args['''source_lang'''] __SCREAMING_SNAKE_CASE = args['''target_lang'''] __SCREAMING_SNAKE_CASE = dirname(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = basename(UpperCAmelCase__ ) # dicts __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , f"""dict.{src_lang}.txt""" ) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , f"""dict.{tgt_lang}.txt""" ) __SCREAMING_SNAKE_CASE = Dictionary.load(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , '''vocab-src.json''' ) print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab __SCREAMING_SNAKE_CASE = True for k in src_vocab.keys(): if not k.islower(): __SCREAMING_SNAKE_CASE = False break __SCREAMING_SNAKE_CASE = Dictionary.load(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , '''vocab-tgt.json''' ) print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) ) # merges_file (bpecodes) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) if os.path.exists(UpperCAmelCase__ ): break with open(UpperCAmelCase__ , encoding='''utf-8''' ) as fin: __SCREAMING_SNAKE_CASE = fin.read() __SCREAMING_SNAKE_CASE = re.sub(r''' \d+$''' , '''''' , UpperCAmelCase__ , 0 , re.M ) # remove frequency number print(f"""Generating {merges_file}""" ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as fout: fout.write(UpperCAmelCase__ ) # model config __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , '''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}""" assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}""" __SCREAMING_SNAKE_CASE = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with __SCREAMING_SNAKE_CASE = 5 __SCREAMING_SNAKE_CASE = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: __SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['''length_penalty'''] else: __SCREAMING_SNAKE_CASE = 1.0 print(f"""Generating {fsmt_model_config_file}""" ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) ) # tokenizer config __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 10_24, '''do_lower_case''': do_lower_case, } print(f"""Generating {fsmt_tokenizer_config_file}""" ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) ) # model __SCREAMING_SNAKE_CASE = chkpt['''models'''][0] __SCREAMING_SNAKE_CASE = model.state_dict() # rename keys to start with 'model.' __SCREAMING_SNAKE_CASE = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys __SCREAMING_SNAKE_CASE = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(UpperCAmelCase__ ) # check that it loads ok model_new.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) # save __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(f"""cd {data_root}""" ) print(f"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase__ =parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
482
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> int: __SCREAMING_SNAKE_CASE = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) __SCREAMING_SNAKE_CASE = hex_num[0] == '''-''' if is_negative: __SCREAMING_SNAKE_CASE = hex_num[1:] try: __SCREAMING_SNAKE_CASE = int(UpperCAmelCase__ , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) __SCREAMING_SNAKE_CASE = '''''' while int_num > 0: __SCREAMING_SNAKE_CASE = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
482
1
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _snake_case ( lowercase ) -> Dict: if not is_accelerate_available(): return method __a : List[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(_lowerCAmelCase ) < version.parse("""0.17.0""" ): return method def wrapper(self , *lowercase , **lowercase ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *_lowerCAmelCase , **_lowerCAmelCase ) return wrapper
701
'''simple docstring''' def _snake_case ( lowercase ) -> bool: if not isinstance(lowercase , lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __a : str = str(lowercase ) __a : Any = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _snake_case ( lowercase = 9_9 ) -> int: if not 0 < percent < 1_0_0: raise ValueError("""solution() only accepts values from 0 to 100""" ) __a : List[str] = 0 __a : Union[str, Any] = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_0_0 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
697
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class a ( unittest.TestCase ): def __snake_case ( self ): UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) UpperCAmelCase__ : int = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48145466, 0.4578275, 0.40821073], 'image_std': [0.26862954, 0.26130258, 0.27577711], 'do_convert_rgb': True, } UpperCAmelCase__ : int = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def __snake_case ( self , **UpperCamelCase_ ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __snake_case ( self , **UpperCamelCase_ ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __snake_case ( self , **UpperCamelCase_ ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): UpperCAmelCase__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __snake_case ( self ): UpperCAmelCase__ : Dict = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_image_processor() UpperCAmelCase__ : Dict = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) UpperCAmelCase__ : Dict = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Tuple = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) UpperCAmelCase__ : Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ ) UpperCAmelCase__ : Dict = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=UpperCamelCase_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCAmelCase__ : Dict = self.prepare_image_inputs() UpperCAmelCase__ : List[Any] = image_processor(UpperCamelCase_ , return_tensors='np' ) UpperCAmelCase__ : Optional[Any] = processor(images=UpperCamelCase_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __snake_case ( self ): UpperCAmelCase__ : str = self.get_image_processor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCAmelCase__ : Dict = 'Alexandra,T-shirt的价格是15便士。' UpperCAmelCase__ : str = processor(text=UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = tokenizer(UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __snake_case ( self ): UpperCAmelCase__ : int = self.get_image_processor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCAmelCase__ : Tuple = 'Alexandra,T-shirt的价格是15便士。' UpperCAmelCase__ : Optional[int] = self.prepare_image_inputs() UpperCAmelCase__ : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def __snake_case ( self ): UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : Tuple = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : str = processor.batch_decode(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : List[Any] = self.get_image_processor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = 'Alexandra,T-shirt的价格是15便士。' UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs() UpperCAmelCase__ : List[str] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
110
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json""" ), } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''xlm-roberta''' def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A_ : Union[str, Any] = vocab_size A_ : int = hidden_size A_ : Dict = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Tuple = hidden_act A_ : Optional[int] = intermediate_size A_ : Any = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Union[str, Any] = type_vocab_size A_ : List[str] = initializer_range A_ : Tuple = layer_norm_eps A_ : str = position_embedding_type A_ : Any = use_cache A_ : str = classifier_dropout class UpperCAmelCase ( __A ): '''simple docstring''' @property def lowerCAmelCase_ ( self ): """simple docstring""" if self.task == "multiple-choice": A_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A_ : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
558
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
712
'''simple docstring''' from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
435
0
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int ) -> int: assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: lowercase : Optional[Any] =f'''The input value of [n={number}] has to be > 0''' raise ValueError(UpperCAmelCase_ ) else: lowercase : List[Any] =sylvester(number - 1 ) lowercase : str =num - 1 lowercase : List[str] =num return lower * upper + 1 if __name__ == "__main__": print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
92
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets _lowercase = """\ @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } """ _lowercase = """\ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. """ _lowercase = """ Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"pearson\": Pearson Correlation \"spearmanr\": Spearman Correlation \"matthews_correlation\": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> glue_metric = datasets.load_metric('glue', 'stsb') >>> references = [0., 1., 2., 3., 4., 5.] >>> predictions = [0., 1., 2., 3., 4., 5.] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)}) {'pearson': 1.0, 'spearmanr': 1.0} >>> glue_metric = datasets.load_metric('glue', 'cola') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) -> int: return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ : Tuple =simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =float(fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) -> List[str]: SCREAMING_SNAKE_CASE_ : str =float(pearsonr(UpperCAmelCase_ , UpperCAmelCase_ )[0] ) SCREAMING_SNAKE_CASE_ : List[str] =float(spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): def _snake_case ( self ) -> str: if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["sst2", "mnli", "mnli_mismatched", "mnli_matched", ''' '''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def _snake_case ( self , __A , __A ) -> List[Any]: if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(__A , __A )} elif self.config_name == "stsb": return pearson_and_spearman(__A , __A ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(__A , __A ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(__A , __A )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["sst2", "mnli", "mnli_mismatched", "mnli_matched", ''' '''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
443
0
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=0.6 , __A=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =image_size _lowerCAmelCase =patch_size _lowerCAmelCase =num_channels _lowerCAmelCase =is_training _lowerCAmelCase =use_labels _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =type_sequence_label_size _lowerCAmelCase =initializer_range _lowerCAmelCase =mask_ratio _lowerCAmelCase =scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCAmelCase =(image_size // patch_size) ** 2 _lowerCAmelCase =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase__ ( self ) -> str: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase =self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ) -> Optional[Any]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCamelCase__ ( self , __A , __A , __A ) -> int: _lowerCAmelCase =TFViTMAEModel(config=snake_case_ ) _lowerCAmelCase =model(snake_case_ , training=snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , __A , __A , __A ) -> str: _lowerCAmelCase =TFViTMAEForPreTraining(snake_case_ ) _lowerCAmelCase =model(snake_case_ , training=snake_case_ ) # expected sequence length = num_patches _lowerCAmelCase =(self.image_size // self.patch_size) ** 2 _lowerCAmelCase =self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowerCAmelCase =1 _lowerCAmelCase =TFViTMAEForPreTraining(snake_case_ ) _lowerCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase =model(snake_case_ , training=snake_case_ ) _lowerCAmelCase =self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase__ ( self ) -> Optional[int]: _lowerCAmelCase =self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =config_and_inputs _lowerCAmelCase ={'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase): """simple docstring""" lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase : List[str] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} lowercase : List[Any] = False lowercase : str = False lowercase : Dict = False lowercase : Dict = False def UpperCamelCase__ ( self ) -> Optional[int]: _lowerCAmelCase =TFViTMAEModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 ) def UpperCamelCase__ ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCamelCase__ ( self ) -> Union[str, Any]: pass def UpperCamelCase__ ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(snake_case_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _lowerCAmelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) ) def UpperCamelCase__ ( self ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(snake_case_ ) _lowerCAmelCase =inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case_ ) def UpperCamelCase__ ( self ) -> str: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def UpperCamelCase__ ( self ) -> List[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case_ ) def UpperCamelCase__ ( self ) -> str: # make the mask reproducible np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCAmelCase =model_class(snake_case_ ) _lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ ) _lowerCAmelCase =model(snake_case_ , noise=snake_case_ ) _lowerCAmelCase =copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) ) _lowerCAmelCase =model(**snake_case_ , noise=snake_case_ ) _lowerCAmelCase =outputs_dict[0].numpy() _lowerCAmelCase =outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCamelCase__ ( self ) -> str: # make the mask reproducible np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__A ): _lowerCAmelCase ={} for k, v in inputs_dict.items(): if tf.is_tensor(snake_case_ ): _lowerCAmelCase =v.numpy() else: _lowerCAmelCase =np.array(snake_case_ ) return inputs_np_dict for model_class in self.all_model_classes: _lowerCAmelCase =model_class(snake_case_ ) _lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ ) _lowerCAmelCase =prepare_numpy_arrays(snake_case_ ) _lowerCAmelCase =model(snake_case_ , noise=snake_case_ ) _lowerCAmelCase =model(**snake_case_ , noise=snake_case_ ) self.assert_outputs_same(snake_case_ , snake_case_ ) def UpperCamelCase__ ( self , __A , __A , __A ) -> Any: # make masks reproducible np.random.seed(2 ) _lowerCAmelCase =int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCAmelCase =tf.constant(snake_case_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCAmelCase =tf_noise super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ ) def UpperCamelCase__ ( self ) -> int: # make mask reproducible np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase ={ module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(snake_case_ ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(snake_case_ , snake_case_ ),) if isinstance(snake_case_ , snake_case_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(snake_case_ , '_keras_serializable' , snake_case_ ) } _lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCAmelCase =tf.convert_to_tensor(snake_case_ ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: _lowerCAmelCase =main_layer_class(snake_case_ ) _lowerCAmelCase ={ name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _lowerCAmelCase =tf.keras.Model(snake_case_ , outputs=main_layer(snake_case_ ) ) _lowerCAmelCase =model(snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =os.path.join(snake_case_ , 'keras_model.h5' ) model.save(snake_case_ ) _lowerCAmelCase =tf.keras.models.load_model( snake_case_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(snake_case_ , tf.keras.Model ) _lowerCAmelCase =model(snake_case_ ) self.assert_outputs_same(snake_case_ , snake_case_ ) @slow def UpperCamelCase__ ( self ) -> Any: # make mask reproducible np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCAmelCase =model_class(snake_case_ ) _lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ ) _lowerCAmelCase =model(snake_case_ , noise=snake_case_ ) if model_class.__name__ == "TFViTMAEModel": _lowerCAmelCase =outputs.last_hidden_state.numpy() _lowerCAmelCase =0 else: _lowerCAmelCase =outputs.logits.numpy() _lowerCAmelCase =0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ , saved_model=snake_case_ ) _lowerCAmelCase =model_class.from_pretrained(snake_case_ ) _lowerCAmelCase =model(snake_case_ , noise=snake_case_ ) if model_class.__name__ == "TFViTMAEModel": _lowerCAmelCase =after_outputs['last_hidden_state'].numpy() _lowerCAmelCase =0 else: _lowerCAmelCase =after_outputs['logits'].numpy() _lowerCAmelCase =0 _lowerCAmelCase =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case_ , 1E-5 ) def UpperCamelCase__ ( self ) -> Optional[Any]: # make mask reproducible np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCAmelCase =model_class(snake_case_ ) _lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ ) _lowerCAmelCase =model(snake_case_ , noise=snake_case_ ) _lowerCAmelCase =model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(snake_case_ ) _lowerCAmelCase =model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _lowerCAmelCase =model_class.from_config(model.config ) _lowerCAmelCase =new_model(snake_case_ ) # Build model new_model.set_weights(model.get_weights() ) _lowerCAmelCase =new_model(snake_case_ , noise=snake_case_ ) self.assert_outputs_same(snake_case_ , snake_case_ ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCamelCase__ ( self ) -> Optional[int]: pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCamelCase__ ( self ) -> List[Any]: pass @slow def UpperCamelCase__ ( self ) -> Optional[int]: _lowerCAmelCase =TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(snake_case_ ) def UpperCamelCase__ ( ): '''simple docstring''' _lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @cached_property def UpperCamelCase__ ( self ) -> int: return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ) -> Dict: # make random mask reproducible across the PT and TF model np.random.seed(2 ) _lowerCAmelCase =TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=snake_case_ , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCAmelCase =ViTMAEConfig() _lowerCAmelCase =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowerCAmelCase =np.random.uniform(size=(1, num_patches) ) # forward pass _lowerCAmelCase =model(**snake_case_ , noise=snake_case_ ) # verify the logits _lowerCAmelCase =tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , snake_case_ ) _lowerCAmelCase =tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 )
720
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowercase_ = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( __lowercase): """simple docstring""" lowercase : int = 'sequence-classification' def __init__( self , __A ) -> List[Any]: if type(__A ) == dict: _lowerCAmelCase =Namespace(**__A ) _lowerCAmelCase =glue_output_modes[hparams.task] _lowerCAmelCase =glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def UpperCamelCase__ ( self , **__A ) -> Any: return self.model(**__A ) def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]: _lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None _lowerCAmelCase =self(**__A ) _lowerCAmelCase =outputs[0] _lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler'] _lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def UpperCamelCase__ ( self ) -> Any: _lowerCAmelCase =self.hparams _lowerCAmelCase =processors[args.task]() _lowerCAmelCase =processor.get_labels() for mode in ["train", "dev"]: _lowerCAmelCase =self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , __A ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) _lowerCAmelCase =( processor.get_dev_examples(args.data_dir ) if mode == 'dev' else processor.get_train_examples(args.data_dir ) ) _lowerCAmelCase =convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('Saving features into cached file %s' , __A ) torch.save(__A , __A ) def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader: _lowerCAmelCase ='dev' if mode == 'test' else mode _lowerCAmelCase =self._feature_file(__A ) logger.info('Loading features from cached file %s' , __A ) _lowerCAmelCase =torch.load(__A ) _lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def UpperCamelCase__ ( self , __A , __A ) -> List[str]: _lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None _lowerCAmelCase =self(**__A ) _lowerCAmelCase , _lowerCAmelCase =outputs[:2] _lowerCAmelCase =logits.detach().cpu().numpy() _lowerCAmelCase =inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCamelCase__ ( self , __A ) -> tuple: _lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item() _lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _lowerCAmelCase =np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _lowerCAmelCase =np.squeeze(__A ) _lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 ) _lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] _lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] _lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _lowerCAmelCase =dict(results.items() ) _lowerCAmelCase =results return ret, preds_list, out_label_list def UpperCamelCase__ ( self , __A ) -> dict: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A ) _lowerCAmelCase =ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCamelCase__ ( self , __A ) -> dict: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A ) _lowerCAmelCase =ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCamelCase__ ( __A , __A ) -> Any: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '--max_seq_length' , default=128 , type=__A , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , ) parser.add_argument( '--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser def UpperCamelCase__ ( ): '''simple docstring''' _lowerCAmelCase =argparse.ArgumentParser() add_generic_args(a__ , os.getcwd() ) _lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() ) _lowerCAmelCase =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _lowerCAmelCase =os.path.join( './results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , ) os.makedirs(args.output_dir ) _lowerCAmelCase =GLUETransformer(a__ ) _lowerCAmelCase =generic_train(a__ , a__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) ) _lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(a__ ) if __name__ == "__main__": main()
58
0
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a__ ( A_, A_, A_ ): '''simple docstring''' if gpta_config_file == "": __magic_name__ = GPTaConfig() else: __magic_name__ = GPTaConfig.from_json_file(A_ ) __magic_name__ = GPTaModel(A_ ) # Load weights from numpy load_tf_weights_in_gpta(A_, A_, A_ ) # Save pytorch-model __magic_name__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __magic_name__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict(), A_ ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(A_, """w""", encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __lowerCAmelCase : Tuple = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
529
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any]=0.01 , UpperCamelCase__ : Tuple=1000 ) -> str: """simple docstring""" __magic_name__ = p_stop __magic_name__ = max_length def __iter__( self : Union[str, Any] ) -> Dict: """simple docstring""" __magic_name__ = 0 __magic_name__ = False while not stop and count < self.max_length: yield count count += 1 __magic_name__ = random.random() < self.p_stop class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=True ) -> Dict: """simple docstring""" __magic_name__ = [ BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 ) ] __magic_name__ = [list(UpperCamelCase__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCamelCase__ ) for shard in batch_sampler_shards] , [len(UpperCamelCase__ ) for e in expected] ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) def _lowercase ( self : Dict ) -> str: """simple docstring""" __magic_name__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __magic_name__ = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _lowercase ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]: """simple docstring""" random.seed(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = [ IterableDatasetShard( UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , ) for i in range(UpperCamelCase__ ) ] __magic_name__ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCamelCase__ ) iterable_dataset_lists.append(list(UpperCamelCase__ ) ) __magic_name__ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __magic_name__ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) self.assertTrue(len(UpperCamelCase__ ) % shard_batch_size == 0 ) __magic_name__ = [] for idx in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCamelCase__ ) < len(UpperCamelCase__ ): reference += reference self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__ )] ) def _lowercase ( self : Tuple ) -> List[str]: """simple docstring""" __magic_name__ = 42 __magic_name__ = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Edge case with a very small dataset __magic_name__ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> int: """simple docstring""" __magic_name__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = SkipBatchSampler(UpperCamelCase__ , 2 ) self.assertListEqual(list(UpperCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" __magic_name__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self : str ) -> List[str]: """simple docstring""" __magic_name__ = DataLoader(list(range(16 ) ) , batch_size=4 ) __magic_name__ = skip_first_batches(UpperCamelCase__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" __magic_name__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _lowercase ( self : Tuple ) -> List[str]: """simple docstring""" Accelerator() __magic_name__ = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
529
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCamelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCamelCase__ = json.load(f) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Union[str, Any] ): '''simple docstring''' return FSMTTokenizer.from_pretrained(__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : List[Any] ): '''simple docstring''' __a = FSMTForConditionalGeneration.from_pretrained(__lowercase ).to(__lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def UpperCamelCase_ ( self : str , __lowercase : List[str] , __lowercase : Tuple ): '''simple docstring''' __a = F"facebook/wmt19-{pair}" __a = self.get_tokenizer(__lowercase ) __a = self.get_model(__lowercase ) __a = bleu_data[pair]["""src"""] __a = bleu_data[pair]["""tgt"""] __a = tokenizer(__lowercase , return_tensors="""pt""" , truncation=__lowercase , padding="""longest""" ).to(__lowercase ) __a = model.generate( input_ids=batch.input_ids , num_beams=8 , ) __a = tokenizer.batch_decode( __lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) __a = calculate_bleu(__lowercase , __lowercase ) print(__lowercase ) self.assertGreaterEqual(scores["""bleu"""] , __lowercase )
717
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=False ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a = len(set_a.intersection(_SCREAMING_SNAKE_CASE ) ) if alternative_union: __a = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) else: __a = len(set_a.union(_SCREAMING_SNAKE_CASE ) ) return intersection / union if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): __a = [element for element in set_a if element in set_b] if alternative_union: __a = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) / union else: __a = set_a + [element for element in set_b if element not in set_a] return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return None if __name__ == "__main__": lowerCamelCase__ = {"""a""", """b""", """c""", """d""", """e"""} lowerCamelCase__ = {"""c""", """d""", """e""", """f""", """h""", """i"""} print(jaccard_similarity(set_a, set_b))
547
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A_ ( a , a , a , a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = BigBirdConfig.from_json_file(a ) print(f"Building PyTorch model from configuration: {config}" ) if is_trivia_qa: SCREAMING_SNAKE_CASE_ : Any = BigBirdForQuestionAnswering(a ) else: SCREAMING_SNAKE_CASE_ : Tuple = BigBirdForPreTraining(a ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(a , a , is_trivia_qa=a ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
511
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _A ( __magic_name__): SCREAMING_SNAKE_CASE : int = '''gpt_neo''' SCREAMING_SNAKE_CASE : Tuple = ['''past_key_values'''] SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = vocab_size SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings SCREAMING_SNAKE_CASE_ : str = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_layers SCREAMING_SNAKE_CASE_ : List[str] = num_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Optional[int] = window_size SCREAMING_SNAKE_CASE_ : int = activation_function SCREAMING_SNAKE_CASE_ : Union[str, Any] = resid_dropout SCREAMING_SNAKE_CASE_ : Optional[Any] = embed_dropout SCREAMING_SNAKE_CASE_ : int = attention_dropout SCREAMING_SNAKE_CASE_ : int = classifier_dropout SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = use_cache SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id SCREAMING_SNAKE_CASE_ : Union[str, Any] = eos_token_id SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_types SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def A_ ( a , a , a , a ): """simple docstring""" import torch SCREAMING_SNAKE_CASE_ : List[Any] = input.size() SCREAMING_SNAKE_CASE_ : str = len(a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = shape[dimension] SCREAMING_SNAKE_CASE_ : List[Any] = torch.arange(0 , a , a ) SCREAMING_SNAKE_CASE_ : List[str] = torch.div(sizedim - size , a , rounding_mode='floor' ) + 1 SCREAMING_SNAKE_CASE_ : Tuple = torch.arange(a ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [slice(a )] * rank SCREAMING_SNAKE_CASE_ : int = indices SCREAMING_SNAKE_CASE_ : int = input[s] SCREAMING_SNAKE_CASE_ : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(a ) def A_ ( a , a ): """simple docstring""" import torch SCREAMING_SNAKE_CASE_ : str = torch.arange(1 , a ) SCREAMING_SNAKE_CASE_ : List[str] = torch.remainder(a , a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = remainders == 0 SCREAMING_SNAKE_CASE_ : Any = candidates[divisor_indices] SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.max(a ) return largest_divisor, torch.div(a , a , rounding_mode='floor' ) class _A ( __magic_name__): @property def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'} else: SCREAMING_SNAKE_CASE_ : List[Any] = {0: 'batch', 1: 'sequence'} return common_inputs @property def UpperCAmelCase ( self ): """simple docstring""" return self._config.num_heads def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( _SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE_ : int = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_ : Any = seqlen + 2 SCREAMING_SNAKE_CASE_ : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE_ : List[Any] = [ (torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['attention_mask'] if self.use_past: SCREAMING_SNAKE_CASE_ : Any = ordered_inputs['attention_mask'].dtype SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def UpperCAmelCase ( self ): """simple docstring""" return 13
511
1
'''simple docstring''' _SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} _SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"] def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: snake_case = start # add current to visited visited.append(__lowerCAmelCase ) snake_case = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # if all neighbors visited add current to sort sort.append(__lowerCAmelCase ) # if all vertices haven't been visited select a new one to visit if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): for vertice in vertices: if vertice not in visited: snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # return sort return sort if __name__ == "__main__": _SCREAMING_SNAKE_CASE = topological_sort("a", [], []) print(sort)
517
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _SCREAMING_SNAKE_CASE = random.Random() def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=1.0 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None ) -> Optional[Any]: if rng is None: snake_case = global_rng snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : int , __snake_case : int , __snake_case : Dict=7 , __snake_case : Optional[int]=4_00 , __snake_case : Optional[int]=20_00 , __snake_case : List[str]=1 , __snake_case : str=0.0 , __snake_case : Dict=1_60_00 , __snake_case : Dict=True , __snake_case : Optional[int]=True , )-> Optional[int]: snake_case = parent snake_case = batch_size snake_case = min_seq_length snake_case = max_seq_length snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case = feature_size snake_case = padding_value snake_case = sampling_rate snake_case = return_attention_mask snake_case = do_normalize def lowerCAmelCase ( self : Union[str, Any] )-> Dict: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase ( self : Tuple , __snake_case : List[Any]=False , __snake_case : int=False )-> Tuple: def _flatten(__snake_case : List[str] ): return list(itertools.chain(*__snake_case ) ) if equal_length: snake_case = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size snake_case = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case = [np.asarray(__snake_case ) for x in speech_inputs] return speech_inputs class _lowerCAmelCase ( A__ , unittest.TestCase ): """simple docstring""" snake_case_ = WavaVecaFeatureExtractor def lowerCAmelCase ( self : Tuple )-> Optional[Any]: snake_case = WavaVecaFeatureExtractionTester(self ) def lowerCAmelCase ( self : Dict , __snake_case : str )-> List[Any]: self.assertTrue(np.all(np.mean(__snake_case , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__snake_case , axis=0 ) - 1 ) < 1e-3 ) ) def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case = [np.asarray(__snake_case ) for speech_input in speech_inputs] # Test not batched input snake_case = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values snake_case = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) ) # Test batched snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ): self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] snake_case = np.asarray(__snake_case ) snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ): self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) ) def lowerCAmelCase ( self : int )-> List[str]: snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case = ["""longest""", """max_length""", """do_not_pad"""] snake_case = [None, 16_00, None] for max_length, padding in zip(__snake_case , __snake_case ): snake_case = feat_extract(__snake_case , padding=__snake_case , max_length=__snake_case , return_tensors="""np""" ) snake_case = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self.assertTrue(input_values[0][8_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self.assertTrue(input_values[0][10_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def lowerCAmelCase ( self : Any )-> Tuple: snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = range(8_00 , 14_00 , 2_00 ) snake_case = [floats_list((1, x) )[0] for x in lengths] snake_case = ["""longest""", """max_length""", """do_not_pad"""] snake_case = [None, 16_00, None] for max_length, padding in zip(__snake_case , __snake_case ): snake_case = feat_extract(__snake_case , max_length=__snake_case , padding=__snake_case ) snake_case = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def lowerCAmelCase ( self : int )-> str: snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case = feat_extract( __snake_case , truncation=__snake_case , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" ) snake_case = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def lowerCAmelCase ( self : int )-> str: snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case = feat_extract( __snake_case , truncation=__snake_case , max_length=10_00 , padding="""longest""" , return_tensors="""np""" ) snake_case = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00) ) snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case = feat_extract( __snake_case , truncation=__snake_case , max_length=20_00 , padding="""longest""" , return_tensors="""np""" ) snake_case = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00) ) @require_torch def lowerCAmelCase ( self : List[str] )-> Union[str, Any]: import torch snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = np.random.rand(1_00 ).astype(np.floataa ) snake_case = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def lowerCAmelCase ( self : str )-> List[Any]: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: snake_case = WavaVecaConfig.from_pretrained(__snake_case ) snake_case = WavaVecaFeatureExtractor.from_pretrained(__snake_case ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
517
1
import sys def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : Union[str, Any] = len(_UpperCamelCase ) snake_case_ : List[Any] = [[0 for x in range(_UpperCamelCase )] for x in range(_UpperCamelCase )] snake_case_ : List[Any] = [[0 for x in range(_UpperCamelCase )] for x in range(_UpperCamelCase )] for chain_length in range(2 , _UpperCamelCase ): for a in range(1 , n - chain_length + 1 ): snake_case_ : Dict = a + chain_length - 1 snake_case_ : Tuple = sys.maxsize for c in range(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: snake_case_ : List[str] = cost snake_case_ : int = c return matrix, sol def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" if i == j: print('''A''' + str(_UpperCamelCase ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(_UpperCamelCase , _UpperCamelCase , optimal_solution[i][j] ) print_optiomal_solution(_UpperCamelCase , optimal_solution[i][j] + 1 , _UpperCamelCase ) print(''')''' , end=''' ''' ) def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = [30, 35, 15, 5, 10, 20, 25] snake_case_ : str = len(_UpperCamelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 snake_case_ , snake_case_ : Optional[Any] = matrix_chain_order(_UpperCamelCase ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(_UpperCamelCase , 1 , n - 1 ) if __name__ == "__main__": main()
60
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any]=0.01 , UpperCamelCase__ : Tuple=1000 ) -> str: """simple docstring""" __magic_name__ = p_stop __magic_name__ = max_length def __iter__( self : Union[str, Any] ) -> Dict: """simple docstring""" __magic_name__ = 0 __magic_name__ = False while not stop and count < self.max_length: yield count count += 1 __magic_name__ = random.random() < self.p_stop class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=True ) -> Dict: """simple docstring""" __magic_name__ = [ BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 ) ] __magic_name__ = [list(UpperCamelCase__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCamelCase__ ) for shard in batch_sampler_shards] , [len(UpperCamelCase__ ) for e in expected] ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ ) def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) # Check the shards when the dataset is very small. __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) __magic_name__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ ) def _lowercase ( self : Dict ) -> str: """simple docstring""" __magic_name__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __magic_name__ = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _lowercase ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]: """simple docstring""" random.seed(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = [ IterableDatasetShard( UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , ) for i in range(UpperCamelCase__ ) ] __magic_name__ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCamelCase__ ) iterable_dataset_lists.append(list(UpperCamelCase__ ) ) __magic_name__ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __magic_name__ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) self.assertTrue(len(UpperCamelCase__ ) % shard_batch_size == 0 ) __magic_name__ = [] for idx in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCamelCase__ ) < len(UpperCamelCase__ ): reference += reference self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__ )] ) def _lowercase ( self : Tuple ) -> List[str]: """simple docstring""" __magic_name__ = 42 __magic_name__ = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) # Edge case with a very small dataset __magic_name__ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> int: """simple docstring""" __magic_name__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase__ ) __magic_name__ = SkipBatchSampler(UpperCamelCase__ , 2 ) self.assertListEqual(list(UpperCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" __magic_name__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self : str ) -> List[str]: """simple docstring""" __magic_name__ = DataLoader(list(range(16 ) ) , batch_size=4 ) __magic_name__ = skip_first_batches(UpperCamelCase__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" __magic_name__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _lowercase ( self : Tuple ) -> List[str]: """simple docstring""" Accelerator() __magic_name__ = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
529
0
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('0.12.2'): raise Exception('requires fairseq >= 0.12.2') if version.parse(fairseq.__version__) > version.parse('2'): raise Exception('requires fairseq < v2') logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) __magic_name__ = 'Hello, World!' __magic_name__ = 'en_XX' def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : bool): A_ : Dict = Path("""data_bin""") A_ : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_lowerCAmelCase).parent) , checkpoint_file=Path(_lowerCAmelCase).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase).parent / """sentencepiece.bpe.model""") , src_dict=str(data_dir / """dict.txt""") , ) xmod.eval() # disable dropout print(_lowerCAmelCase) A_ : Tuple = xmod.model.encoder.sentence_encoder A_ : List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: A_ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("""Our X-MOD config:""" , _lowerCAmelCase) A_ : Optional[Any] = XmodForSequenceClassification(_lowerCAmelCase) if classification_head else XmodForMaskedLM(_lowerCAmelCase) model.eval() # Now let's copy all the weights. # Embeddings A_ : str = xmod_sent_encoder.embed_tokens.weight A_ : List[Any] = xmod_sent_encoder.embed_positions.weight A_ : Optional[int] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. A_ : Any = xmod_sent_encoder.layernorm_embedding.weight A_ : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer A_ : Optional[Any] = model.roberta.encoder.layer[i] A_ : str = xmod_sent_encoder.layers[i] # self attention A_ : Any = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError("""Dimensions of self-attention weights do not match.""") A_ : Tuple = xmod_layer.self_attn.q_proj.weight A_ : Optional[int] = xmod_layer.self_attn.q_proj.bias A_ : Tuple = xmod_layer.self_attn.k_proj.weight A_ : Optional[int] = xmod_layer.self_attn.k_proj.bias A_ : Union[str, Any] = xmod_layer.self_attn.v_proj.weight A_ : Dict = xmod_layer.self_attn.v_proj.bias # self-attention output A_ : Dict = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("""Dimensions of self-attention output weights do not match.""") A_ : List[Any] = xmod_layer.self_attn.out_proj.weight A_ : Union[str, Any] = xmod_layer.self_attn.out_proj.bias A_ : str = xmod_layer.self_attn_layer_norm.weight A_ : Tuple = xmod_layer.self_attn_layer_norm.bias # intermediate A_ : int = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of intermediate weights do not match.""") A_ : Any = xmod_layer.fca.weight A_ : Dict = xmod_layer.fca.bias # output A_ : Tuple = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of feed-forward weights do not match.""") A_ : str = xmod_layer.fca.weight A_ : Any = xmod_layer.fca.bias A_ : List[str] = xmod_layer.final_layer_norm.weight A_ : List[Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: A_ : Union[str, Any] = xmod_layer.adapter_layer_norm.weight A_ : Tuple = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError("""Lists of language adapters do not match.""") for lang_code, adapter in xmod_layer.adapter_modules.items(): A_ : Optional[int] = bert_output.adapter_modules[lang_code] A_ : List[Any] = xmod_layer.adapter_modules[lang_code] A_ : int = from_adapter.fca.weight A_ : Dict = from_adapter.fca.bias A_ : int = from_adapter.fca.weight A_ : Any = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: A_ : str = xmod_sent_encoder.layer_norm.weight A_ : Optional[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: A_ : int = xmod.model.classification_heads["mnli"].dense.weight A_ : Any = xmod.model.classification_heads["mnli"].dense.bias A_ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight A_ : Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head A_ : Any = xmod.model.encoder.lm_head.dense.weight A_ : Any = xmod.model.encoder.lm_head.dense.bias A_ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight A_ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias A_ : Optional[int] = xmod.model.encoder.lm_head.weight A_ : List[Any] = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. A_ : Any = xmod.encode(_lowerCAmelCase).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(_lowerCAmelCase) A_ : int = model(_lowerCAmelCase)[0] if classification_head: A_ : List[str] = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase)) else: A_ : str = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) A_ : Dict = torch.max(torch.abs(our_output - their_output)).item() print(F'max_absolute_diff = {max_absolute_diff}') # ~ 1e-7 A_ : Dict = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""") if not success: raise Exception("""Something went wRoNg""") Path(_lowerCAmelCase).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase) print(F'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(_lowerCAmelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) __magic_name__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
711
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __magic_name__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,): '''simple docstring''' super().__init__(**_a ) A_ : Tuple = size if size is not None else {"""shortest_edge""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Union[str, Any] = resample A_ : Dict = do_center_crop A_ : List[str] = crop_size A_ : Any = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Any = do_normalize A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Tuple = do_convert_rgb def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,): '''simple docstring''' A_ : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Tuple = size if size is not None else self.size A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a ) A_ : List[str] = resample if resample is not None else self.resample A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a ) A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Any = do_normalize if do_normalize is not None else self.do_normalize A_ : int = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[int] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_a ) for image in images] if do_resize: A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : List[str] = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class lowerCAmelCase_ ( snake_case__ ): UpperCamelCase_ :str = 'levit' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=224 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Dict=16 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[128, 256, 384] , SCREAMING_SNAKE_CASE_ : str=[4, 8, 12] , SCREAMING_SNAKE_CASE_ : Any=[4, 4, 4] , SCREAMING_SNAKE_CASE_ : List[Any]=[16, 16, 16] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , **SCREAMING_SNAKE_CASE_ : Tuple , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ = image_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = kernel_size lowerCAmelCase__ = stride lowerCAmelCase__ = padding lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = depths lowerCAmelCase__ = key_dim lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = patch_size lowerCAmelCase__ = attention_ratio lowerCAmelCase__ = mlp_ratio lowerCAmelCase__ = initializer_range lowerCAmelCase__ = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowerCAmelCase_ ( snake_case__ ): UpperCamelCase_ :List[Any] = version.parse('1.11' ) @property def __snake_case ( self : Any ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __snake_case ( self : List[Any] ): return 1e-4
668
from typing import Any def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list: '''simple docstring''' _validation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) # Creates data structures and fill initial step lowerCAmelCase__ = {} lowerCAmelCase__ = {} for state in states_space: lowerCAmelCase__ = observations_space[0] lowerCAmelCase__ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(lowercase__ ) ): lowerCAmelCase__ = observations_space[o] lowerCAmelCase__ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCAmelCase__ = '''''' lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state # Update probabilities and pointers dicts lowerCAmelCase__ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = arg_max # The final observation lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1] # argmax for given final observation lowerCAmelCase__ = '''''' lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state lowerCAmelCase__ = arg_max # Process pointers backwards lowerCAmelCase__ = last_state lowerCAmelCase__ = [] for o in range(len(lowercase__ ) - 1 , -1 , -1 ): result.append(lowercase__ ) lowerCAmelCase__ = pointers[previous, observations_space[o]] result.reverse() return result def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None: '''simple docstring''' _validate_not_empty( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) _validate_lists(lowercase__ , lowercase__ ) _validate_dicts( lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None: '''simple docstring''' _validate_list(lowercase__ , '''observations_space''' ) _validate_list(lowercase__ , '''states_space''' ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None: '''simple docstring''' if not isinstance(_object , lowercase__ ): lowerCAmelCase__ = f'{var_name} must be a list' raise ValueError(lowercase__ ) else: for x in _object: if not isinstance(lowercase__ , lowercase__ ): lowerCAmelCase__ = f'{var_name} must be a list of strings' raise ValueError(lowercase__ ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None: '''simple docstring''' _validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ ) _validate_nested_dict(lowercase__ , '''transition_probabilities''' ) _validate_nested_dict(lowercase__ , '''emission_probabilities''' ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None: '''simple docstring''' _validate_dict(_object , lowercase__ , lowercase__ ) for x in _object.values(): _validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , lowercase__ ): lowerCAmelCase__ = f'{var_name} must be a dict' raise ValueError(lowercase__ ) if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ): lowerCAmelCase__ = f'{var_name} all keys must be strings' raise ValueError(lowercase__ ) if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ): lowerCAmelCase__ = '''nested dictionary ''' if nested else '''''' lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
668
1
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' a_ =LxmertConfig.from_json_file(lowercase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a_ =LxmertForPreTraining(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
41
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase = logging.get_logger(__name__) lowercase = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } lowercase = { '''b0''': { '''hidden_dim''': 1_280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1_280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1_408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1_536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1_792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2_048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2_304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2_560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ =EfficientNetConfig() a_ =CONFIG_MAP[model_name]["hidden_dim"] a_ =CONFIG_MAP[model_name]["width_coef"] a_ =CONFIG_MAP[model_name]["depth_coef"] a_ =CONFIG_MAP[model_name]["image_size"] a_ =CONFIG_MAP[model_name]["dropout_rate"] a_ =CONFIG_MAP[model_name]["dw_padding"] a_ ="huggingface/label-files" a_ ="imagenet-1k-id2label.json" a_ =1_0_0_0 a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) ) a_ ={int(lowercase__ ): v for k, v in idalabel.items()} a_ =idalabel a_ ={v: k for k, v in idalabel.items()} return config def UpperCAmelCase_ ( ): '''simple docstring''' a_ ="http://images.cocodataset.org/val2017/000000039769.jpg" a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ =CONFIG_MAP[model_name]["image_size"] a_ =EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , ) return preprocessor def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] a_ =sorted(set(lowercase__ ) ) a_ =len(lowercase__ ) a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )} a_ =[] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: a_ =block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) a_ ={} for item in rename_keys: if item[0] in original_param_names: a_ ="efficientnet." + item[1] a_ ="classifier.weight" a_ ="classifier.bias" return key_mapping def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue a_ =key_mapping[key] if "_conv" in key and "kernel" in key: a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: a_ =torch.from_numpy(np.transpose(lowercase__ ) ) else: a_ =torch.from_numpy(lowercase__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase__ ) @torch.no_grad() def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' a_ =model_classes[model_name]( include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , ) a_ =original_model.trainable_variables a_ =original_model.non_trainable_variables a_ ={param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: a_ =param.numpy() a_ =list(tf_params.keys() ) # Load HuggingFace model a_ =get_efficientnet_config(lowercase__ ) a_ =EfficientNetForImageClassification(lowercase__ ).eval() a_ =hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) a_ =rename_keys(lowercase__ ) replace_params(lowercase__ , lowercase__ , lowercase__ ) # Initialize preprocessor and preprocess input image a_ =convert_image_processor(lowercase__ ) a_ =preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): a_ =hf_model(**lowercase__ ) a_ =outputs.logits.detach().numpy() # Original model inference a_ =False a_ =CONFIG_MAP[model_name]["image_size"] a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) a_ =image.img_to_array(lowercase__ ) a_ =np.expand_dims(lowercase__ , axis=0 ) a_ =original_model.predict(lowercase__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase__ ): os.mkdir(lowercase__ ) # Save converted model and image processor hf_model.save_pretrained(lowercase__ ) preprocessor.save_pretrained(lowercase__ ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) a_ =F"""efficientnet-{model_name}""" preprocessor.push_to_hub(lowercase__ ) hf_model.push_to_hub(lowercase__ ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') lowercase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
41
1
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __A ( unittest.TestCase ): def A__ ( self :Tuple ): '''simple docstring''' super().tearDown() gc.collect() def A__ ( self :Union[str, Any] ): '''simple docstring''' __magic_name__ , __magic_name__ : Union[str, Any] =FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=__snake_case , dtype=jnp.bfloataa ) __magic_name__ , __magic_name__ : Union[str, Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__snake_case , from_pt=__snake_case , dtype=jnp.bfloataa ) __magic_name__ : Union[str, Any] =controlnet_params __magic_name__ : int ="""bird""" __magic_name__ : Optional[int] =jax.device_count() __magic_name__ : Optional[int] =pipe.prepare_text_inputs([prompts] * num_samples ) __magic_name__ : List[str] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) __magic_name__ : Union[str, Any] =pipe.prepare_image_inputs([canny_image] * num_samples ) __magic_name__ : Union[str, Any] =jax.random.PRNGKey(0 ) __magic_name__ : List[Any] =jax.random.split(__snake_case , jax.device_count() ) __magic_name__ : Dict =replicate(__snake_case ) __magic_name__ : Tuple =shard(__snake_case ) __magic_name__ : int =shard(__snake_case ) __magic_name__ : Optional[Any] =pipe( prompt_ids=__snake_case , image=__snake_case , params=__snake_case , prng_seed=__snake_case , num_inference_steps=50 , jit=__snake_case , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) __magic_name__ : Any =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __magic_name__ : List[Any] =images[0, 2_53:2_56, 2_53:2_56, -1] __magic_name__ : Dict =jnp.asarray(jax.device_get(image_slice.flatten() ) ) __magic_name__ : Optional[int] =jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def A__ ( self :Optional[int] ): '''simple docstring''' __magic_name__ , __magic_name__ : Optional[int] =FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=__snake_case , dtype=jnp.bfloataa ) __magic_name__ , __magic_name__ : Tuple =FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__snake_case , from_pt=__snake_case , dtype=jnp.bfloataa ) __magic_name__ : List[Any] =controlnet_params __magic_name__ : Optional[int] ="""Chef in the kitchen""" __magic_name__ : Dict =jax.device_count() __magic_name__ : Optional[int] =pipe.prepare_text_inputs([prompts] * num_samples ) __magic_name__ : Union[str, Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) __magic_name__ : Optional[Any] =pipe.prepare_image_inputs([pose_image] * num_samples ) __magic_name__ : Any =jax.random.PRNGKey(0 ) __magic_name__ : Optional[Any] =jax.random.split(__snake_case , jax.device_count() ) __magic_name__ : str =replicate(__snake_case ) __magic_name__ : Optional[int] =shard(__snake_case ) __magic_name__ : List[str] =shard(__snake_case ) __magic_name__ : List[str] =pipe( prompt_ids=__snake_case , image=__snake_case , params=__snake_case , prng_seed=__snake_case , num_inference_steps=50 , jit=__snake_case , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) __magic_name__ : Union[str, Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __magic_name__ : Dict =images[0, 2_53:2_56, 2_53:2_56, -1] __magic_name__ : Union[str, Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) ) __magic_name__ : str =jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
21
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __A ( UpperCamelCase__ ): UpperCamelCase = """segformer""" def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ): '''simple docstring''' super().__init__(**__snake_case ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , ) __magic_name__ : Dict =num_channels __magic_name__ : str =num_encoder_blocks __magic_name__ : List[Any] =depths __magic_name__ : Optional[Any] =sr_ratios __magic_name__ : List[str] =hidden_sizes __magic_name__ : List[str] =patch_sizes __magic_name__ : Any =strides __magic_name__ : Optional[Any] =mlp_ratios __magic_name__ : str =num_attention_heads __magic_name__ : int =hidden_act __magic_name__ : List[Any] =hidden_dropout_prob __magic_name__ : Optional[Any] =attention_probs_dropout_prob __magic_name__ : Optional[Any] =classifier_dropout_prob __magic_name__ : List[str] =initializer_range __magic_name__ : List[str] =drop_path_rate __magic_name__ : List[Any] =layer_norm_eps __magic_name__ : List[str] =decoder_hidden_size __magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case ) __magic_name__ : Dict =semantic_loss_ignore_index class __A ( UpperCamelCase__ ): UpperCamelCase = version.parse("""1.11""" ) @property def A__ ( self :List[str] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def A__ ( self :Any ): '''simple docstring''' return 1E-4 @property def A__ ( self :int ): '''simple docstring''' return 12
21
1
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowercase ( _lowercase ): def UpperCAmelCase__ (self ): lowerCamelCase_ : Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(A , '''num_attention_heads''' ) ) self.parent.assertTrue(hasattr(A , '''num_encoder_blocks''' ) ) class __lowercase : def __init__(self , A , A=1_3 , A=6_4 , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[1_6, 3_2, 6_4, 1_2_8] , A=[1, 4, 8, 1_6] , A=[1, 2, 4, 8] , A=True , A=True , A="gelu" , A=0.1 , A=0.1 , A=0.02 , A=3 , A=None , ): lowerCamelCase_ : Any = parent lowerCamelCase_ : Optional[int] = batch_size lowerCamelCase_ : str = image_size lowerCamelCase_ : int = num_channels lowerCamelCase_ : Tuple = num_encoder_blocks lowerCamelCase_ : int = sr_ratios lowerCamelCase_ : List[str] = depths lowerCamelCase_ : Union[str, Any] = hidden_sizes lowerCamelCase_ : List[str] = downsampling_rates lowerCamelCase_ : Any = num_attention_heads lowerCamelCase_ : Union[str, Any] = is_training lowerCamelCase_ : Dict = use_labels lowerCamelCase_ : Optional[Any] = hidden_act lowerCamelCase_ : Tuple = hidden_dropout_prob lowerCamelCase_ : List[Any] = attention_probs_dropout_prob lowerCamelCase_ : int = initializer_range lowerCamelCase_ : Tuple = num_labels lowerCamelCase_ : str = scope def UpperCAmelCase__ (self ): lowerCamelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ : Optional[Any] = None if self.use_labels: lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase_ : List[str] = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ (self ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def UpperCAmelCase__ (self , A , A , A ): lowerCamelCase_ : Tuple = SegformerModel(config=A ) model.to(A ) model.eval() lowerCamelCase_ : str = model(A ) lowerCamelCase_ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def UpperCAmelCase__ (self , A , A , A ): lowerCamelCase_ : Union[str, Any] = self.num_labels lowerCamelCase_ : Tuple = SegformerForSemanticSegmentation(A ) model.to(A ) model.eval() lowerCamelCase_ : str = model(A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) lowerCamelCase_ : Optional[int] = model(A , labels=A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCAmelCase__ (self , A , A , A ): lowerCamelCase_ : int = 1 lowerCamelCase_ : str = SegformerForSemanticSegmentation(config=A ) model.to(A ) model.eval() lowerCamelCase_ : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A ) lowerCamelCase_ : int = model(A , labels=A ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : int = config_and_inputs lowerCamelCase_ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowercase ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase : Tuple = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) lowerCamelCase : Union[str, Any] = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase : Optional[Any] = True lowerCamelCase : Tuple = False lowerCamelCase : Any = False lowerCamelCase : Optional[int] = False def UpperCAmelCase__ (self ): lowerCamelCase_ : Tuple = SegformerModelTester(self ) lowerCamelCase_ : List[Any] = SegformerConfigTester(self , config_class=A ) def UpperCAmelCase__ (self ): self.config_tester.run_common_tests() def UpperCAmelCase__ (self ): lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*A ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*A ) @unittest.skip('''SegFormer does not use inputs_embeds''' ) def UpperCAmelCase__ (self ): pass @unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' ) def UpperCAmelCase__ (self ): pass def UpperCAmelCase__ (self ): lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : str = model_class(A ) lowerCamelCase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : List[Any] = [*signature.parameters.keys()] lowerCamelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A ) def UpperCAmelCase__ (self ): lowerCamelCase_, lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : str = True for model_class in self.all_model_classes: lowerCamelCase_ : int = True lowerCamelCase_ : Tuple = False lowerCamelCase_ : List[str] = True lowerCamelCase_ : Optional[Any] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): lowerCamelCase_ : Optional[int] = model(**self._prepare_for_class(A , A ) ) lowerCamelCase_ : Any = outputs.attentions lowerCamelCase_ : Dict = sum(self.model_tester.depths ) self.assertEqual(len(A ) , A ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ : Any = True lowerCamelCase_ : Dict = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): lowerCamelCase_ : Tuple = model(**self._prepare_for_class(A , A ) ) lowerCamelCase_ : Any = outputs.attentions self.assertEqual(len(A ) , A ) # verify the first attentions (first block, first layer) lowerCamelCase_ : Dict = (self.model_tester.image_size // 4) ** 2 lowerCamelCase_ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) lowerCamelCase_ : int = (self.model_tester.image_size // 3_2) ** 2 lowerCamelCase_ : List[Any] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) lowerCamelCase_ : Union[str, Any] = len(A ) # Check attention is always last and order is fine lowerCamelCase_ : Optional[int] = True lowerCamelCase_ : Tuple = True lowerCamelCase_ : Dict = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): lowerCamelCase_ : List[Any] = model(**self._prepare_for_class(A , A ) ) self.assertEqual(out_len + 1 , len(A ) ) lowerCamelCase_ : List[Any] = outputs.attentions self.assertEqual(len(A ) , A ) # verify the first attentions (first block, first layer) lowerCamelCase_ : List[str] = (self.model_tester.image_size // 4) ** 2 lowerCamelCase_ : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def UpperCAmelCase__ (self ): def check_hidden_states_output(A , A , A ): lowerCamelCase_ : Dict = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): lowerCamelCase_ : int = model(**self._prepare_for_class(A , A ) ) lowerCamelCase_ : int = outputs.hidden_states lowerCamelCase_ : Optional[int] = self.model_tester.num_encoder_blocks self.assertEqual(len(A ) , A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Any = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ : Tuple = True check_hidden_states_output(A , A , A ) def UpperCAmelCase__ (self ): if not self.model_tester.is_training: return lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(A ): continue lowerCamelCase_ : int = model_class(A ) model.to(A ) model.train() lowerCamelCase_ : Dict = self._prepare_for_class(A , A , return_labels=A ) lowerCamelCase_ : str = model(**A ).loss loss.backward() @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase__ (self ): pass @slow def UpperCAmelCase__ (self ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : Tuple = SegformerModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase_ ( ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class __lowercase ( unittest.TestCase ): @slow def UpperCAmelCase__ (self ): # only resize + normalize lowerCamelCase_ : Dict = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) lowerCamelCase_ : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to( A ) lowerCamelCase_ : Optional[Any] = prepare_img() lowerCamelCase_ : Any = image_processor(images=A , return_tensors='''pt''' ) lowerCamelCase_ : List[str] = encoded_inputs.pixel_values.to(A ) with torch.no_grad(): lowerCamelCase_ : Any = model(A ) lowerCamelCase_ : List[str] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , A ) lowerCamelCase_ : List[Any] = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4 ) ) @slow def UpperCAmelCase__ (self ): # only resize + normalize lowerCamelCase_ : Dict = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) lowerCamelCase_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained( '''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(A ) lowerCamelCase_ : Optional[int] = prepare_img() lowerCamelCase_ : Union[str, Any] = image_processor(images=A , return_tensors='''pt''' ) lowerCamelCase_ : Optional[int] = encoded_inputs.pixel_values.to(A ) with torch.no_grad(): lowerCamelCase_ : Dict = model(A ) lowerCamelCase_ : List[str] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , A ) lowerCamelCase_ : Optional[int] = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1 ) ) @slow def UpperCAmelCase__ (self ): # only resize + normalize lowerCamelCase_ : Optional[int] = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) lowerCamelCase_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to( A ) lowerCamelCase_ : List[Any] = prepare_img() lowerCamelCase_ : Optional[int] = image_processor(images=A , return_tensors='''pt''' ) lowerCamelCase_ : Tuple = encoded_inputs.pixel_values.to(A ) with torch.no_grad(): lowerCamelCase_ : Union[str, Any] = model(A ) lowerCamelCase_ : List[str] = outputs.logits.detach().cpu() lowerCamelCase_ : Dict = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_0_0, 3_0_0)] ) lowerCamelCase_ : Optional[int] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , A ) lowerCamelCase_ : str = image_processor.post_process_semantic_segmentation(outputs=A ) lowerCamelCase_ : int = torch.Size((1_2_8, 1_2_8) ) self.assertEqual(segmentation[0].shape , A )
357
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( _lowercase , unittest.TestCase ): lowerCamelCase : Optional[Any] = KandinskyVaaInpaintPipeline lowerCamelCase : Tuple = ["image_embeds", "negative_image_embeds", "image", "mask_image"] lowerCamelCase : List[str] = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] lowerCamelCase : Optional[int] = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowerCamelCase : List[Any] = False @property def UpperCAmelCase__ (self ): return 3_2 @property def UpperCAmelCase__ (self ): return 3_2 @property def UpperCAmelCase__ (self ): return self.time_input_dim @property def UpperCAmelCase__ (self ): return self.time_input_dim * 4 @property def UpperCAmelCase__ (self ): return 1_0_0 @property def UpperCAmelCase__ (self ): torch.manual_seed(0 ) lowerCamelCase_ : Optional[Any] = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowerCamelCase_ : str = UNetaDConditionModel(**A ) return model @property def UpperCAmelCase__ (self ): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase__ (self ): torch.manual_seed(0 ) lowerCamelCase_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase__ (self ): lowerCamelCase_ : List[str] = self.dummy_unet lowerCamelCase_ : Optional[int] = self.dummy_movq lowerCamelCase_ : Union[str, Any] = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A , set_alpha_to_one=A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=A , ) lowerCamelCase_ : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCAmelCase__ (self , A , A=0 ): lowerCamelCase_ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A ) lowerCamelCase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A ) # create init_image lowerCamelCase_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A ) ).to(A ) lowerCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ : Any = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) ) # create mask lowerCamelCase_ : List[str] = np.ones((6_4, 6_4) , dtype=np.floataa ) lowerCamelCase_ : Union[str, Any] = 0 if str(A ).startswith('''mps''' ): lowerCamelCase_ : int = torch.manual_seed(A ) else: lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) lowerCamelCase_ : Optional[Any] = { '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 6_4, '''width''': 6_4, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = '''cpu''' lowerCamelCase_ : str = self.get_dummy_components() lowerCamelCase_ : Tuple = self.pipeline_class(**A ) lowerCamelCase_ : Union[str, Any] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) lowerCamelCase_ : str = pipe(**self.get_dummy_inputs(A ) ) lowerCamelCase_ : Any = output.images lowerCamelCase_ : Union[str, Any] = pipe( **self.get_dummy_inputs(A ) , return_dict=A , )[0] lowerCamelCase_ : List[Any] = image[0, -3:, -3:, -1] lowerCamelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 6_4, 6_4, 3) lowerCamelCase_ : Any = np.array( [0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def UpperCAmelCase__ (self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def UpperCAmelCase__ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ (self ): lowerCamelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' ) lowerCamelCase_ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowerCamelCase_ : List[str] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa ) lowerCamelCase_ : int = 0 lowerCamelCase_ : Union[str, Any] = '''a hat''' lowerCamelCase_ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(A ) lowerCamelCase_ : Any = KandinskyVaaInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa ) lowerCamelCase_ : Any = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) lowerCamelCase_ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase_, lowerCamelCase_ : Tuple = pipe_prior( A , generator=A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowerCamelCase_ : Union[str, Any] = pipeline( image=A , mask_image=A , image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , ) lowerCamelCase_ : List[str] = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(A , A )
357
1
"""simple docstring""" import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class snake_case_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> List[Any]: UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_attention_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_choices def UpperCAmelCase__ ( self) -> List[str]: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCamelCase = None if self.use_attention_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCamelCase = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self) -> Any: UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCAmelCase__ ( self) -> str: UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class snake_case_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" A_ = True A_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self) -> Optional[int]: UpperCamelCase = FlaxRobertaModelTester(self) @slow def UpperCAmelCase__ ( self) -> Dict: for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCamelCase_) UpperCamelCase = model(np.ones((1, 1))) self.assertIsNotNone(lowerCamelCase_)
34
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = (KDPMaDiscreteScheduler,) lowerCamelCase_ : str = 1_0 def _lowercase ( self , **UpperCamelCase__ ) -> int: lowerCamelCase : Optional[Any] = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**UpperCamelCase__ ) return config def _lowercase ( self ) -> List[Any]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def _lowercase ( self ) -> Dict: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ ) def _lowercase ( self ) -> str: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCamelCase__ ) def _lowercase ( self ) -> Any: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def _lowercase ( self ) -> str: lowerCamelCase : Optional[Any] = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" ) lowerCamelCase : Optional[Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : Tuple = self.dummy_model() lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : Union[str, Any] = sample.to(UpperCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : List[str] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Optional[int] = output.prev_sample lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2 assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def _lowercase ( self ) -> str: if torch_device == "mps": return lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config() lowerCamelCase : Optional[int] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : Optional[Any] = self.dummy_model() lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : int = sample.to(UpperCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Optional[Any] = output.prev_sample lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def _lowercase ( self ) -> Optional[int]: if torch_device == "mps": return lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Any = self.get_scheduler_config() lowerCamelCase : str = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ ) lowerCamelCase : List[Any] = self.dummy_model() lowerCamelCase : List[str] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Dict = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = output.prev_sample lowerCamelCase : Dict = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) ) if str(UpperCamelCase__ ).startswith("cpu" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
311
0
'''simple docstring''' from __future__ import annotations def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> float: '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> float: '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __UpperCAmelCase, nominal_annual_percentage_rate / 365, number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
593
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput a : Optional[int] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a ( _lowerCamelCase ): def __init__( self : List[str] , *lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Dict ): super().__init__(*lowercase_ , **lowercase_ ) snake_case_ = eval_examples snake_case_ = post_process_function snake_case_ = quant_trainer_args snake_case_ = 128 # default number of calibration samples def A_ ( self : int , lowercase_ : Tuple=None ): if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) snake_case_ = calib_dataset if calib_dataset is not None else self.calib_dataset snake_case_ = self._remove_unused_columns(lowercase_ , description='''Calibration''' ) return DataLoader( lowercase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase_ , ) def A_ ( self : Dict , lowercase_ : str=None ): snake_case_ = self.train_dataset if calib_dataset is None else calib_dataset snake_case_ = self.get_calib_dataloader(lowercase_ ) snake_case_ = self.model quant_trainer.configure_model(lowercase_ , self.quant_trainer_args , calib=lowercase_ ) model.eval() quant_trainer.enable_calibration(lowercase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(lowercase_ ): # Prediction step snake_case_ ,snake_case_ ,snake_case_ = self.prediction_step(lowercase_ , lowercase_ , prediction_loss_only=lowercase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(lowercase_ , self.quant_trainer_args ) snake_case_ = model def A_ ( self : Optional[int] , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : Optional[int]=None , lowercase_ : str = "eval" ): snake_case_ = self.eval_dataset if eval_dataset is None else eval_dataset snake_case_ = self.get_eval_dataloader(lowercase_ ) snake_case_ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. snake_case_ = self.compute_metrics snake_case_ = None snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: snake_case_ = eval_loop( lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , ) finally: snake_case_ = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions ) snake_case_ = self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): snake_case_ = metrics.pop(lowercase_ ) self.log(lowercase_ ) else: snake_case_ = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) snake_case_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ ) return metrics def A_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict=None , lowercase_ : str = "test" ): snake_case_ = self.get_test_dataloader(lowercase_ ) # Temporarily disable metric computation, we will do it in the loop here. snake_case_ = self.compute_metrics snake_case_ = None snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: snake_case_ = eval_loop( lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , ) finally: snake_case_ = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions , '''predict''' ) snake_case_ = self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): snake_case_ = metrics.pop(lowercase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ ) def A_ ( self : Any , lowercase_ : List[Any]="./" ): snake_case_ = self.eval_dataset snake_case_ = self.get_eval_dataloader(lowercase_ ) snake_case_ = next(iter(lowercase_ ) ) # saving device - to make it consistent snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple snake_case_ = tuple(v.to(lowercase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer snake_case_ = True snake_case_ = self.model.to(lowercase_ ) model.eval() model.float() snake_case_ = model.module if hasattr(lowercase_ , '''module''' ) else model quant_trainer.configure_model(lowercase_ , self.quant_trainer_args ) snake_case_ = os.path.join(lowercase_ , '''model.onnx''' ) logger.info(F"exporting model to {output_model_file}" ) snake_case_ = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( lowercase_ , lowercase_ , lowercase_ , export_params=lowercase_ , opset_version=13 , do_constant_folding=lowercase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=lowercase_ , ) logger.info('''onnx export finished''' )
593
1