code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def __magic_name__ ( lowercase ): if not isinstance(lowercase , lowercase ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
173
"""simple docstring""" def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase ) # Initialize Result SCREAMING_SNAKE_CASE_: str =[] # Traverse through all denomination for denomination in reversed(lowercase ): # Find denominations while int(lowercase ) >= int(lowercase ): total_value -= int(lowercase ) answer.append(lowercase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _UpperCAmelCase = [] _UpperCAmelCase = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): _UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) _UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter _UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] _UpperCAmelCase = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(f"""Following is minimal change for {value}: """) _UpperCAmelCase = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
173
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __a ( _lowerCamelCase ): @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' lowercase__: Dict = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n ' lowercase__: Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache lowercase__: int = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCAmelCase__ ) BertModel.from_pretrained(lowerCAmelCase__ ) BertTokenizer.from_pretrained(lowerCAmelCase__ ) pipeline(task='fill-mask' , model=lowerCAmelCase__ ) # baseline - just load from_pretrained with normal network lowercase__: Dict = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed lowercase__: Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase__: Optional[int] = '1' lowercase__: Tuple = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: List[str] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' lowercase__: Optional[int] = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n ' lowercase__: Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache lowercase__: Any = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCAmelCase__ ) BertModel.from_pretrained(lowerCAmelCase__ ) BertTokenizer.from_pretrained(lowerCAmelCase__ ) pipeline(task='fill-mask' , model=lowerCAmelCase__ ) # baseline - just load from_pretrained with normal network lowercase__: Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed lowercase__: List[str] = self.get_env() lowercase__: Dict = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' lowercase__: Tuple = '\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n ' lowercase__: Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network lowercase__: Tuple = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed lowercase__: Any = self.get_env() lowercase__: Union[str, Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network lowercase__: List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase__: List[str] = '1' lowercase__: List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Optional[int] = '\nfrom transformers import pipeline\n ' lowercase__: Optional[Any] = '\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n ' lowercase__: List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n ' lowercase__: Dict = self.get_env() lowercase__: Union[str, Any] = '1' lowercase__: List[str] = [sys.executable, '-c', '\n'.join([load, mock, run] )] lowercase__: Union[str, Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Dict = '\nfrom transformers import AutoModel\n ' lowercase__: Union[str, Any] = '\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n ' # baseline - just load from_pretrained with normal network lowercase__: Any = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed lowercase__: List[str] = self.get_env() lowercase__: Optional[int] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase__: Any = '1' lowercase__: Optional[int] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
353
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __a ( __UpperCamelCase ): __lowercase : Any = 'pegasus' __lowercase : Union[str, Any] = ['past_key_values'] __lowercase : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> Union[str, Any]: '''simple docstring''' lowercase__: int = vocab_size lowercase__: Optional[int] = max_position_embeddings lowercase__: List[str] = d_model lowercase__: Optional[Any] = encoder_ffn_dim lowercase__: Optional[Any] = encoder_layers lowercase__: Union[str, Any] = encoder_attention_heads lowercase__: Optional[int] = decoder_ffn_dim lowercase__: Tuple = decoder_layers lowercase__: Union[str, Any] = decoder_attention_heads lowercase__: Dict = dropout lowercase__: List[str] = attention_dropout lowercase__: List[str] = activation_dropout lowercase__: Optional[int] = activation_function lowercase__: Dict = init_std lowercase__: Optional[Any] = encoder_layerdrop lowercase__: List[str] = decoder_layerdrop lowercase__: Union[str, Any] = use_cache lowercase__: Any = encoder_layers lowercase__: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' return self.d_model
288
0
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __lowerCAmelCase : Union[str, Any] = 3 def __magic_name__ ( A : int ): '''simple docstring''' print("Generating primitive root of p" ) while True: a = random.randrange(3, A ) if pow(A, 2, A ) == 1: continue if pow(A, A, A ) == 1: continue return g def __magic_name__ ( A : int ): '''simple docstring''' print("Generating prime p..." ) a = rabin_miller.generate_large_prime(A ) # select large prime number. a = primitive_root(A ) # one primitive root on modulo p. a = random.randrange(3, A ) # private_key -> have to be greater than 2 for safety. a = cryptomath.find_mod_inverse(pow(A, A, A ), A ) a = (key_size, e_a, e_a, p) a = (key_size, d) return public_key, private_key def __magic_name__ ( A : str, A : int ): '''simple docstring''' if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("\nWARNING:" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" "Use a different name or delete these files and re-run this program." ) sys.exit() a , a = generate_key(A ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""", "w" ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""", "w" ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def __magic_name__ ( ): '''simple docstring''' print("Making key files..." ) make_key_files("elgamal", 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
107
'''simple docstring''' from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowercase_ = datasets.utils.logging.get_logger(__name__) class __A ( folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' __lowerCamelCase : bool = None __lowerCamelCase : bool = None class __A ( folder_based_builder.FolderBasedBuilder ): '''simple docstring''' __lowerCamelCase : int = datasets.Audio() __lowerCamelCase : str = 'audio' __lowerCamelCase : Optional[Any] = AudioFolderConfig __lowerCamelCase : List[str] # definition at the bottom of the script __lowerCamelCase : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' ) lowercase_ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] lowercase_ = AUDIO_EXTENSIONS
211
0
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCamelCase ( *A : Optional[Any] , **A : Any ) ->Dict: pass @is_pipeline_test @require_vision @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): _UpperCAmelCase : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self : int , A : Dict , A : Optional[Any] , A : Any ) ->Tuple: lowerCamelCase__ : Optional[int] = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase__ : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __lowerCamelCase ( self : Optional[int] , A : List[Any] , A : Optional[Any] ) ->Tuple: lowerCamelCase__ : Optional[int] = object_detector(examples[0] , threshold=0.0 ) lowerCamelCase__ : Tuple = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { '''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase ), '''box''': {'''xmin''': ANY(__UpperCAmelCase ), '''ymin''': ANY(__UpperCAmelCase ), '''xmax''': ANY(__UpperCAmelCase ), '''ymax''': ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __lowerCamelCase ( self : Optional[int] ) ->Tuple: pass @require_torch def __lowerCamelCase ( self : Optional[Any] ) ->Dict: lowerCamelCase__ : Union[str, Any] = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase__ : Optional[int] = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] , ) lowerCamelCase__ : str = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] ] , ) @require_torch @slow def __lowerCamelCase ( self : Tuple ) ->Tuple: lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ : List[Any] = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ] , ) lowerCamelCase__ : Any = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __lowerCamelCase ( self : Optional[int] ) ->List[str]: pass @require_torch @slow def __lowerCamelCase ( self : Optional[int] ) ->List[str]: lowerCamelCase__ : Optional[int] = 0.2 lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ : Optional[int] = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, ] , ) @require_torch @slow def __lowerCamelCase ( self : List[Any] ) ->List[Any]: lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ : List[Any] = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, ] , )
366
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def _a ( UpperCAmelCase , UpperCAmelCase="shi-labs/oneformer_demo" ) -> Union[str, Any]: """simple docstring""" with open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) as f: lowerCamelCase__ : List[Any] = json.load(UpperCAmelCase ) lowerCamelCase__ : Dict = {} lowerCamelCase__ : str = [] lowerCamelCase__ : Optional[Any] = [] for key, info in class_info.items(): lowerCamelCase__ : Union[str, Any] = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(UpperCAmelCase ) ) lowerCamelCase__ : Optional[int] = thing_ids lowerCamelCase__ : Dict = class_names return metadata class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Tuple , A : Tuple , A : List[Any]=7 , A : str=3 , A : List[str]=3_0 , A : Optional[int]=4_0_0 , A : int=None , A : Tuple=True , A : Dict=True , A : Dict=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : int=1_0 , A : List[str]=False , A : Optional[Any]=2_5_5 , A : Union[str, Any]="shi-labs/oneformer_demo" , A : Optional[Any]="ade20k_panoptic.json" , A : str=1_0 , ) ->Dict: lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : List[str] = min_resolution lowerCamelCase__ : str = max_resolution lowerCamelCase__ : Optional[Any] = do_resize lowerCamelCase__ : Any = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size lowerCamelCase__ : str = do_normalize lowerCamelCase__ : List[str] = image_mean lowerCamelCase__ : List[str] = image_std lowerCamelCase__ : Optional[int] = class_info_file lowerCamelCase__ : Any = prepare_metadata(A , A ) lowerCamelCase__ : str = num_text lowerCamelCase__ : Dict = repo_path # for the post_process_functions lowerCamelCase__ : str = 2 lowerCamelCase__ : Union[str, Any] = 1_0 lowerCamelCase__ : List[Any] = 1_0 lowerCamelCase__ : List[Any] = 3 lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : List[Any] = num_labels lowerCamelCase__ : List[Any] = do_reduce_labels lowerCamelCase__ : List[Any] = ignore_index def __lowerCamelCase ( self : str ) ->Dict: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def __lowerCamelCase ( self : List[str] , A : List[Any] , A : Tuple=False ) ->int: if not batched: lowerCamelCase__ : List[Any] = image_inputs[0] if isinstance(A , Image.Image ): lowerCamelCase__ , lowerCamelCase__ : str = image.size else: lowerCamelCase__ , lowerCamelCase__ : str = image.shape[1], image.shape[2] if w < h: lowerCamelCase__ : Any = int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase__ : Tuple = self.size['''shortest_edge'''] elif w > h: lowerCamelCase__ : Union[str, Any] = self.size['''shortest_edge'''] lowerCamelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase__ : Union[str, Any] = self.size['''shortest_edge'''] lowerCamelCase__ : Optional[Any] = self.size['''shortest_edge'''] else: lowerCamelCase__ : Any = [] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : Optional[Any] = max(A , key=lambda A : item[0] )[0] lowerCamelCase__ : List[str] = max(A , key=lambda A : item[1] )[1] return expected_height, expected_width def __lowerCamelCase ( self : Optional[int] ) ->List[str]: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ): _UpperCAmelCase : Optional[int] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string _UpperCAmelCase : Dict = image_processing_class def __lowerCamelCase ( self : Optional[int] ) ->str: lowerCamelCase__ : Optional[int] = OneFormerImageProcessorTester(self ) @property def __lowerCamelCase ( self : List[str] ) ->Any: return self.image_processing_tester.prepare_image_processor_dict() def __lowerCamelCase ( self : int ) ->Tuple: lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , '''image_mean''' ) ) self.assertTrue(hasattr(A , '''image_std''' ) ) self.assertTrue(hasattr(A , '''do_normalize''' ) ) self.assertTrue(hasattr(A , '''do_resize''' ) ) self.assertTrue(hasattr(A , '''size''' ) ) self.assertTrue(hasattr(A , '''ignore_index''' ) ) self.assertTrue(hasattr(A , '''class_info_file''' ) ) self.assertTrue(hasattr(A , '''num_text''' ) ) self.assertTrue(hasattr(A , '''repo_path''' ) ) self.assertTrue(hasattr(A , '''metadata''' ) ) self.assertTrue(hasattr(A , '''do_reduce_labels''' ) ) def __lowerCamelCase ( self : Any ) ->Tuple: pass def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]: # Initialize image_processor lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A , Image.Image ) # Test not batched input lowerCamelCase__ : Any = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(A , batched=A ) lowerCamelCase__ : List[str] = image_processor( A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self : Tuple ) ->Tuple: # Initialize image_processor lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A ) for image in image_inputs: self.assertIsInstance(A , np.ndarray ) # Test not batched input lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processing_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(A , batched=A ) lowerCamelCase__ : int = image_processor( A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self : int ) ->str: # Initialize image_processor lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A ) for image in image_inputs: self.assertIsInstance(A , torch.Tensor ) # Test not batched input lowerCamelCase__ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : int = self.image_processing_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(A , batched=A ) lowerCamelCase__ : Any = image_processor( A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self : Dict , A : Tuple=False , A : Dict=False , A : Optional[Any]="np" ) ->List[str]: lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowerCamelCase__ : Any = self.image_processing_tester.num_labels lowerCamelCase__ : List[str] = None lowerCamelCase__ : Any = None lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A ) if with_segmentation_maps: lowerCamelCase__ : int = num_labels if is_instance_map: lowerCamelCase__ : str = list(range(A ) ) * 2 lowerCamelCase__ : Union[str, Any] = dict(enumerate(A ) ) lowerCamelCase__ : int = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowerCamelCase__ : Any = [Image.fromarray(A ) for annotation in annotations] lowerCamelCase__ : int = image_processor( A , ['''semantic'''] * len(A ) , A , return_tensors='''pt''' , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , ) return inputs def __lowerCamelCase ( self : Dict ) ->Optional[Any]: pass def __lowerCamelCase ( self : Union[str, Any] ) ->Any: def common(A : Dict=False , A : Tuple=None ): lowerCamelCase__ : str = self.comm_get_image_processor_inputs( with_segmentation_maps=A , is_instance_map=A , segmentation_type=A ) lowerCamelCase__ : Union[str, Any] = inputs['''mask_labels'''] lowerCamelCase__ : List[Any] = inputs['''class_labels'''] lowerCamelCase__ : List[str] = inputs['''pixel_values'''] lowerCamelCase__ : Union[str, Any] = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(A , A , A ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(A ) , self.image_processing_tester.num_text ) common() common(is_instance_map=A ) common(is_instance_map=A , segmentation_type='''pil''' ) common(is_instance_map=A , segmentation_type='''pil''' ) def __lowerCamelCase ( self : Any ) ->Optional[int]: lowerCamelCase__ : List[Any] = np.zeros((2_0, 5_0) ) lowerCamelCase__ : Any = 1 lowerCamelCase__ : Union[str, Any] = 1 lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : str = binary_mask_to_rle(A ) self.assertEqual(len(A ) , 4 ) self.assertEqual(rle[0] , 2_1 ) self.assertEqual(rle[1] , 4_5 ) def __lowerCamelCase ( self : int ) ->Dict: lowerCamelCase__ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) lowerCamelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : List[Any] = fature_extractor.post_process_semantic_segmentation(A ) self.assertEqual(len(A ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) lowerCamelCase__ : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )] lowerCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def __lowerCamelCase ( self : Tuple ) ->Tuple: lowerCamelCase__ : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) lowerCamelCase__ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : Union[str, Any] = image_processor.post_process_instance_segmentation(A , threshold=0 ) self.assertTrue(len(A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def __lowerCamelCase ( self : str ) ->Dict: lowerCamelCase__ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) lowerCamelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : List[str] = image_processor.post_process_panoptic_segmentation(A , threshold=0 ) self.assertTrue(len(A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
265
0
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self, *lowercase_, **lowercase_ ) -> None: """simple docstring""" warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
188
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
188
1
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCamelCase_ : List[str] = logging.get_logger(__name__) def __a ( _UpperCamelCase: bool , _UpperCamelCase: bool ) -> List[Any]: """simple docstring""" def run_func(_UpperCamelCase: Optional[int] ): @wraps(_UpperCamelCase ) def run_in_eager_mode(*_UpperCamelCase: Dict , **_UpperCamelCase: Union[str, Any] ): return func(*_UpperCamelCase , **_UpperCamelCase ) @wraps(_UpperCamelCase ) @tf.function(experimental_compile=_UpperCamelCase ) def run_in_graph_mode(*_UpperCamelCase: Dict , **_UpperCamelCase: int ): return func(*_UpperCamelCase , **_UpperCamelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __a ( _UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: int ) -> ["tf.Tensor"]: """simple docstring""" _snake_case = random.Random() _snake_case = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _a ( __lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : TensorFlowBenchmarkArguments SCREAMING_SNAKE_CASE_ : PretrainedConfig SCREAMING_SNAKE_CASE_ : str = "TensorFlow" @property def _lowercase ( self ) -> str: return tf.__version__ def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: # initialize GPU on separate process _snake_case = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _snake_case = self._prepare_inference_func(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return self._measure_speed(_inference ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: _snake_case = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _snake_case = self._prepare_train_func(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return self._measure_speed(_train ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,_SCREAMING_SNAKE_CASE ) _snake_case = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _snake_case = self._prepare_inference_func(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return self._measure_memory(_inference ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,_SCREAMING_SNAKE_CASE ) _snake_case = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _snake_case = self._prepare_train_func(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return self._measure_memory(_train ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Callable[[], None]: _snake_case = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) _snake_case = ( hasattr(_SCREAMING_SNAKE_CASE ,"architectures" ) and isinstance(config.architectures ,_SCREAMING_SNAKE_CASE ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case = __import__("transformers" ,fromlist=[model_class] ) _snake_case = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) _snake_case = model_cls(_SCREAMING_SNAKE_CASE ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: _snake_case = TF_MODEL_MAPPING[config.__class__](_SCREAMING_SNAKE_CASE ) # encoder-decoder has vocab size saved differently _snake_case = config.vocab_size if hasattr(_SCREAMING_SNAKE_CASE ,"vocab_size" ) else config.encoder.vocab_size _snake_case = random_input_ids(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla ) def encoder_decoder_forward(): return model(_SCREAMING_SNAKE_CASE ,decoder_input_ids=_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE ) @run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla ) def encoder_forward(): return model(_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE ) _snake_case = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Callable[[], None]: _snake_case = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." ) if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) _snake_case = ( hasattr(_SCREAMING_SNAKE_CASE ,"architectures" ) and isinstance(config.architectures ,_SCREAMING_SNAKE_CASE ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case = __import__("transformers" ,fromlist=[model_class] ) _snake_case = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) _snake_case = model_cls(_SCREAMING_SNAKE_CASE ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: _snake_case = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_SCREAMING_SNAKE_CASE ) # encoder-decoder has vocab size saved differently _snake_case = config.vocab_size if hasattr(_SCREAMING_SNAKE_CASE ,"vocab_size" ) else config.encoder.vocab_size _snake_case = random_input_ids(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla ) def encoder_decoder_train(): _snake_case = model(_SCREAMING_SNAKE_CASE ,decoder_input_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE )[0] _snake_case = tf.gradients(_SCREAMING_SNAKE_CASE ,model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla ) def encoder_train(): _snake_case = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE )[0] _snake_case = tf.gradients(_SCREAMING_SNAKE_CASE ,model.trainable_variables ) return gradients _snake_case = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" ) timeit.repeat(_SCREAMING_SNAKE_CASE ,repeat=1 ,number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case = timeit.repeat( _SCREAMING_SNAKE_CASE ,repeat=self.args.repeat ,number=10 ,) return min(_SCREAMING_SNAKE_CASE ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> [Memory, MemorySummary]: logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) _snake_case = start_memory_tracing("transformers" ) if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) _snake_case = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() _snake_case = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case = nvml.nvmlDeviceGetMemoryInfo(_SCREAMING_SNAKE_CASE ) _snake_case = meminfo.used _snake_case = Memory(_SCREAMING_SNAKE_CASE ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) _snake_case = None else: _snake_case = measure_peak_memory_cpu(_SCREAMING_SNAKE_CASE ) _snake_case = Memory(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case = stop_memory_tracing(_SCREAMING_SNAKE_CASE ) if memory is None: _snake_case = summary.total else: _snake_case = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
142
'''simple docstring''' def __a ( _UpperCamelCase: int ) -> str: """simple docstring""" if number > 0: raise ValueError("input must be a negative integer" ) _snake_case = len(bin(_UpperCamelCase )[3:] ) _snake_case = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:] _snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCamelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
142
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed A__ : Any = logging.getLogger(__name__) def UpperCamelCase( __UpperCamelCase : Optional[Any]=2 ,__UpperCamelCase : Optional[Any]=3 ,__UpperCamelCase : Dict=16 ,__UpperCamelCase : int = 10 ,__UpperCamelCase : int = 2 ): def get_dataset(__UpperCamelCase : Union[str, Any] ): lowerCAmelCase_ : Any = torch.randn(batch_size * n_batches ,1 ) return TensorDataset(__UpperCamelCase ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) ) lowerCAmelCase_ : Dict = get_dataset(__UpperCamelCase ) lowerCAmelCase_ : Dict = get_dataset(__UpperCamelCase ) lowerCAmelCase_ : Dict = DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,batch_size=__UpperCamelCase ,num_workers=4 ) lowerCAmelCase_ : Optional[Any] = DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,batch_size=__UpperCamelCase ,num_workers=4 ) return (train_dataloader, valid_dataloader) def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any]=None ): lowerCAmelCase_ : Union[str, Any] = [] for epoch in range(__UpperCamelCase ): # Train quickly model.train() for batch in dataloader: lowerCAmelCase_ , lowerCAmelCase_ : Any = batch lowerCAmelCase_ : Dict = model(__UpperCamelCase ) lowerCAmelCase_ : Any = torch.nn.functional.mse_loss(__UpperCamelCase ,__UpperCamelCase ) accelerator.backward(__UpperCamelCase ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __snake_case ( nn.Module ): def __init__( self : Union[str, Any]): super().__init__() lowerCAmelCase_ : str = nn.Parameter(torch.randn(1)) lowerCAmelCase_ : Dict = nn.Parameter(torch.randn(1)) def UpperCAmelCase__ ( self : Dict , A_ : int): return x * self.a + self.b class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2) lowerCAmelCase_ : List[str] = DummyModel() lowerCAmelCase_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3) lowerCAmelCase_ , lowerCAmelCase_ : Any = dummy_dataloaders() lowerCAmelCase_ : int = ProjectConfiguration(total_limit=1 , project_dir=A_ , automatic_checkpoint_naming=A_) # Train baseline lowerCAmelCase_ : str = Accelerator(project_config=A_) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = accelerator.prepare( A_ , A_ , A_ , A_) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir)) , 1) def UpperCAmelCase__ ( self : Tuple): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2) lowerCAmelCase_ : Dict = DummyModel() lowerCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3) lowerCAmelCase_ , lowerCAmelCase_ : Dict = dummy_dataloaders() # Train baseline lowerCAmelCase_ : Optional[int] = Accelerator() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = accelerator.prepare( A_ , A_ , A_ , A_) # Save initial lowerCAmelCase_ : Union[str, Any] = os.path.join(A_ , '''initial''') accelerator.save_state(A_) ((lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() lowerCAmelCase_ : List[Any] = optimizer.state_dict() lowerCAmelCase_ : List[Any] = train(3 , A_ , A_ , A_ , A_) ((lowerCAmelCase_) , (lowerCAmelCase_)) : str = model.a.item(), model.b.item() lowerCAmelCase_ : Tuple = optimizer.state_dict() # Train partially set_seed(4_2) lowerCAmelCase_ : List[Any] = DummyModel() lowerCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3) lowerCAmelCase_ , lowerCAmelCase_ : str = dummy_dataloaders() lowerCAmelCase_ : Dict = Accelerator() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = accelerator.prepare( A_ , A_ , A_ , A_) accelerator.load_state(A_) ((lowerCAmelCase_) , (lowerCAmelCase_)) : List[Any] = model.a.item(), model.b.item() lowerCAmelCase_ : Tuple = optimizer.state_dict() self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) lowerCAmelCase_ : Union[str, Any] = train(2 , A_ , A_ , A_ , A_) # Save everything lowerCAmelCase_ : List[str] = os.path.join(A_ , '''checkpoint''') accelerator.save_state(A_) # Load everything back in and make sure all states work accelerator.load_state(A_) test_rands += train(1 , A_ , A_ , A_ , A_) ((lowerCAmelCase_) , (lowerCAmelCase_)) : List[str] = model.a.item(), model.b.item() lowerCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) def UpperCAmelCase__ ( self : Any): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2) lowerCAmelCase_ : Optional[Any] = DummyModel() lowerCAmelCase_ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3) lowerCAmelCase_ , lowerCAmelCase_ : Dict = dummy_dataloaders() lowerCAmelCase_ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=A_) # Train baseline lowerCAmelCase_ : Any = Accelerator(project_dir=A_ , project_config=A_) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = accelerator.prepare( A_ , A_ , A_ , A_) # Save initial accelerator.save_state() ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() lowerCAmelCase_ : Optional[int] = optimizer.state_dict() lowerCAmelCase_ : Optional[int] = train(3 , A_ , A_ , A_ , A_) ((lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() lowerCAmelCase_ : Union[str, Any] = optimizer.state_dict() # Train partially set_seed(4_2) lowerCAmelCase_ : Any = DummyModel() lowerCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3) lowerCAmelCase_ , lowerCAmelCase_ : Dict = dummy_dataloaders() lowerCAmelCase_ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A_) lowerCAmelCase_ : List[str] = Accelerator(project_dir=A_ , project_config=A_) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = accelerator.prepare( A_ , A_ , A_ , A_) accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''')) ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() lowerCAmelCase_ : Any = optimizer.state_dict() self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) lowerCAmelCase_ : Optional[Any] = train(2 , A_ , A_ , A_ , A_) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_1''')) test_rands += train(1 , A_ , A_ , A_ , A_) ((lowerCAmelCase_) , (lowerCAmelCase_)) : List[str] = model.a.item(), model.b.item() lowerCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) self.assertEqual(A_ , A_) def UpperCAmelCase__ ( self : Optional[int]): lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 2, 3]) lowerCAmelCase_ : Any = torch.tensor([2, 3, 4]) lowerCAmelCase_ : Any = DummyModel() lowerCAmelCase_ : Dict = torch.optim.Adam(net.parameters()) lowerCAmelCase_ : str = Accelerator() with self.assertRaises(A_) as ve: accelerator.register_for_checkpointing(A_ , A_ , A_ , A_) lowerCAmelCase_ : str = str(ve.exception) self.assertTrue('''Item at index 0''' in message) self.assertTrue('''Item at index 1''' in message) self.assertFalse('''Item at index 2''' in message) self.assertFalse('''Item at index 3''' in message) def UpperCAmelCase__ ( self : Any): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2) lowerCAmelCase_ : Optional[Any] = DummyModel() lowerCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3) lowerCAmelCase_ : Optional[int] = torch.optim.lr_scheduler.StepLR(A_ , step_size=1 , gamma=0.99) lowerCAmelCase_ , lowerCAmelCase_ : str = dummy_dataloaders() lowerCAmelCase_ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A_) # Train baseline lowerCAmelCase_ : Optional[int] = Accelerator(project_dir=A_ , project_config=A_) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = accelerator.prepare( A_ , A_ , A_ , A_ , A_) # Save initial accelerator.save_state() lowerCAmelCase_ : Union[str, Any] = scheduler.state_dict() train(3 , A_ , A_ , A_ , A_ , A_) self.assertNotEqual(A_ , scheduler.state_dict()) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''')) self.assertEqual(A_ , scheduler.state_dict()) def UpperCAmelCase__ ( self : Optional[Any]): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2) lowerCAmelCase_ : List[str] = DummyModel() lowerCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=A_ , total_limit=2) # Train baseline lowerCAmelCase_ : Optional[int] = Accelerator(project_dir=A_ , project_config=A_) lowerCAmelCase_ : List[Any] = accelerator.prepare(A_) # Save 3 states: for _ in range(1_1): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0'''))) self.assertTrue(os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_9'''))) self.assertTrue(os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_10'''))) @require_cuda def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : Optional[Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] execute_subprocess_async(A_ , env=os.environ.copy()) if __name__ == "__main__": A__ : Optional[int] = '''/tmp/accelerate/state_checkpointing''' A__ : List[str] = DummyModel() A__ : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3) A__ : List[str] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) A__ , A__ : Optional[Any] = dummy_dataloaders() A__ : Any = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline A__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) A__ , A__ , A__ , A__ , A__ : Any = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) A__ , A__ : Dict = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: A__ : List[str] = group['''params'''][0].device break assert param_device.type == accelerator.device.type A__ : Optional[Any] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''') for group in optimizer.param_groups: A__ : List[str] = group['''params'''][0].device break assert ( param_device.type == torch.device('''cpu''').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''') for group in optimizer.param_groups: A__ : Optional[int] = group['''params'''][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''): accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
103
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A__ : Optional[Any] = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } A__ : Dict = logging.get_logger(__name__) class __snake_case ( UpperCamelCase_ ): _a = '''mask2former''' _a = ['''swin'''] _a = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , A_ : Optional[Dict] = None , A_ : int = 2_5_6 , A_ : int = 2_5_6 , A_ : int = 2_5_6 , A_ : int = 1_0_2_4 , A_ : str = "relu" , A_ : int = 6 , A_ : int = 1_0 , A_ : int = 8 , A_ : float = 0.0 , A_ : int = 2_0_4_8 , A_ : bool = False , A_ : bool = False , A_ : int = 4 , A_ : int = 2_5_5 , A_ : int = 1_0_0 , A_ : float = 0.1 , A_ : float = 2.0 , A_ : float = 5.0 , A_ : float = 5.0 , A_ : int = 1_2_5_4_4 , A_ : float = 3.0 , A_ : float = 0.75 , A_ : float = 0.02 , A_ : float = 1.0 , A_ : bool = True , A_ : List[int] = [4, 8, 1_6, 3_2] , A_ : bool = None , **A_ : Dict , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''') lowerCAmelCase_ : int = CONFIG_MAPPING['''swin''']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=A_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(A_ , A_): lowerCAmelCase_ : List[Any] = backbone_config.pop('''model_type''') lowerCAmelCase_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ : List[Any] = config_class.from_dict(A_) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported)}""") lowerCAmelCase_ : List[Any] = backbone_config lowerCAmelCase_ : str = feature_size lowerCAmelCase_ : Optional[Any] = mask_feature_size lowerCAmelCase_ : int = hidden_dim lowerCAmelCase_ : int = encoder_feedforward_dim lowerCAmelCase_ : Optional[int] = activation_function lowerCAmelCase_ : Any = encoder_layers lowerCAmelCase_ : Optional[Any] = decoder_layers lowerCAmelCase_ : Optional[Any] = num_attention_heads lowerCAmelCase_ : Optional[int] = dropout lowerCAmelCase_ : List[str] = dim_feedforward lowerCAmelCase_ : Optional[Any] = pre_norm lowerCAmelCase_ : List[str] = enforce_input_projection lowerCAmelCase_ : Tuple = common_stride lowerCAmelCase_ : Optional[Any] = ignore_value lowerCAmelCase_ : Optional[Any] = num_queries lowerCAmelCase_ : int = no_object_weight lowerCAmelCase_ : Tuple = class_weight lowerCAmelCase_ : int = mask_weight lowerCAmelCase_ : Dict = dice_weight lowerCAmelCase_ : str = train_num_points lowerCAmelCase_ : Dict = oversample_ratio lowerCAmelCase_ : Tuple = importance_sample_ratio lowerCAmelCase_ : List[str] = init_std lowerCAmelCase_ : List[str] = init_xavier_std lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = feature_strides lowerCAmelCase_ : int = output_auxiliary_logits lowerCAmelCase_ : Optional[Any] = decoder_layers super().__init__(**A_) @classmethod def UpperCAmelCase__ ( cls : List[str] , A_ : PretrainedConfig , **A_ : List[Any]): return cls( backbone_config=A_ , **A_ , ) def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : str = copy.deepcopy(self.__dict__) lowerCAmelCase_ : Dict = self.backbone_config.to_dict() lowerCAmelCase_ : Optional[int] = self.__class__.model_type return output
103
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _A : List[Any] = XGLMTokenizer _A : List[str] = XGLMTokenizerFast _A : Union[str, Any] = True _A : int = True def lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowercase : List[str] = XGLMTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase : Dict = "<pad>" __lowercase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1008 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : int = XGLMTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE ) __lowercase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowercase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) self.assertListEqual( _SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) self.assertListEqual( _SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_SCREAMING_SNAKE_CASE , f.name ) __lowercase : List[str] = XGLMTokenizer(f.name , keep_accents=_SCREAMING_SNAKE_CASE ) __lowercase : str = pickle.dumps(_SCREAMING_SNAKE_CASE ) pickle.loads(_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" if not self.test_rust_tokenizer: return __lowercase : int = self.get_tokenizer() __lowercase : int = self.get_rust_tokenizer() __lowercase : Optional[Any] = "I was born in 92000, and this is falsé." __lowercase : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __lowercase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __lowercase : Any = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowercase : Dict = self.get_rust_tokenizer() __lowercase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __lowercase : Optional[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase : List[Any] = "Hello World!" __lowercase : int = [2, 31227, 4447, 35] self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) @slow def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Any = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off __lowercase : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) @slow def lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" __lowercase : Union[str, Any] = { "input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""facebook/xglm-564M""" , padding=_SCREAMING_SNAKE_CASE , )
364
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
0
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _A = HfArgumentParser(InitializationArguments) _A = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _A = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _A = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) _A = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _A = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
62
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger('transformers.models.speecht5') UpperCAmelCase__ = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } UpperCAmelCase__ = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } UpperCAmelCase__ = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } UpperCAmelCase__ = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } UpperCAmelCase__ = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } UpperCAmelCase__ = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } UpperCAmelCase__ = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } UpperCAmelCase__ = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = [] UpperCAmelCase__ = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[Any]: for attribute in key.split('''.''' ): _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: _snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case = value elif weight_type == "weight_g": _snake_case = value elif weight_type == "weight_v": _snake_case = value elif weight_type == "bias": _snake_case = value elif weight_type == "running_mean": _snake_case = value elif weight_type == "running_var": _snake_case = value elif weight_type == "num_batches_tracked": _snake_case = value else: _snake_case = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]: _snake_case = [] if task == "s2t": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2T _snake_case = IGNORE_KEYS_S2T elif task == "t2s": _snake_case = None _snake_case = MAPPING_T2S _snake_case = IGNORE_KEYS_T2S elif task == "s2s": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2S _snake_case = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__lowerCamelCase , __lowerCamelCase ): logger.info(f'''{name} was ignored''' ) continue _snake_case = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: _snake_case = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _snake_case = True if "*" in mapped_key: _snake_case = name.split(__lowerCamelCase )[0].split('''.''' )[-2] _snake_case = mapped_key.replace('''*''' , __lowerCamelCase ) if "weight_g" in name: _snake_case = '''weight_g''' elif "weight_v" in name: _snake_case = '''weight_v''' elif "bias" in name: _snake_case = '''bias''' elif "weight" in name: _snake_case = '''weight''' elif "running_mean" in name: _snake_case = '''running_mean''' elif "running_var" in name: _snake_case = '''running_var''' elif "num_batches_tracked" in name: _snake_case = '''num_batches_tracked''' else: _snake_case = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = full_name.split('''conv_layers.''' )[-1] _snake_case = name.split('''.''' ) _snake_case = int(items[0] ) _snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict: if config_path is not None: _snake_case = SpeechTaConfig.from_pretrained(__lowerCamelCase ) else: _snake_case = SpeechTaConfig() if task == "s2t": _snake_case = config.max_text_positions _snake_case = SpeechTaForSpeechToText(__lowerCamelCase ) elif task == "t2s": _snake_case = 18_76 _snake_case = 6_00 _snake_case = config.max_speech_positions _snake_case = SpeechTaForTextToSpeech(__lowerCamelCase ) elif task == "s2s": _snake_case = 18_76 _snake_case = config.max_speech_positions _snake_case = SpeechTaForSpeechToSpeech(__lowerCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: _snake_case = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _snake_case = AddedToken('''<mask>''' , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) _snake_case = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _snake_case = SpeechTaFeatureExtractor() _snake_case = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) _snake_case = torch.load(__lowerCamelCase ) recursively_load_weights(fairseq_checkpoint['''model'''] , __lowerCamelCase , __lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
288
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __A = logging.get_logger(__name__) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict: def run_func(__UpperCamelCase ): @wraps(__UpperCamelCase ) def run_in_eager_mode(*__UpperCamelCase , **__UpperCamelCase ): return func(*__UpperCamelCase , **__UpperCamelCase ) @wraps(__UpperCamelCase ) @tf.function(experimental_compile=__UpperCamelCase ) def run_in_graph_mode(*__UpperCamelCase , **__UpperCamelCase ): return func(*__UpperCamelCase , **__UpperCamelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> ["tf.Tensor"]: _lowerCAmelCase =random.Random() _lowerCAmelCase =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = 42 lowerCamelCase = 42 lowerCamelCase = "TensorFlow" @property def _lowerCAmelCase ( self ) -> int: return tf.__version__ def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: # initialize GPU on separate process _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_speed(_inference ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_speed(_train ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase ) _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_memory(_inference ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase ) _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_memory(_train ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Callable[[], None]: _lowerCAmelCase =self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase =( hasattr(__UpperCAmelCase , """architectures""" ) and isinstance(config.architectures , __UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase =__import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase =getattr(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =model_cls(__UpperCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase =TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase =config.vocab_size if hasattr(__UpperCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase =random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__UpperCAmelCase , training=__UpperCAmelCase ) _lowerCAmelCase =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Callable[[], None]: _lowerCAmelCase =self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase =( hasattr(__UpperCAmelCase , """architectures""" ) and isinstance(config.architectures , __UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase =__import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase =getattr(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =model_cls(__UpperCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase =config.vocab_size if hasattr(__UpperCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase =random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase =model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0] _lowerCAmelCase =tf.gradients(__UpperCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0] _lowerCAmelCase =tf.gradients(__UpperCAmelCase , model.trainable_variables ) return gradients _lowerCAmelCase =encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowerCAmelCase ( self , __UpperCAmelCase ) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__UpperCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase =timeit.repeat( __UpperCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__UpperCAmelCase ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> [Memory, MemorySummary]: logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase =start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase ="""N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase =nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase ) _lowerCAmelCase =meminfo.used _lowerCAmelCase =Memory(__UpperCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase =None else: _lowerCAmelCase =measure_peak_memory_cpu(__UpperCAmelCase ) _lowerCAmelCase =Memory(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase =stop_memory_tracing(__UpperCAmelCase ) if memory is None: _lowerCAmelCase =summary.total else: _lowerCAmelCase =None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
341
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class _lowercase ( A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaPhonemeCTCTokenizer SCREAMING_SNAKE_CASE__ : Tuple = False def __magic_name__( self :Any ) -> Optional[Any]: super().setUp() __SCREAMING_SNAKE_CASE : Dict = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} __SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' ) def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :List[str]=20 , lowerCAmelCase__ :str=5 ) -> Tuple[str, list]: __SCREAMING_SNAKE_CASE : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )) for i in range(len(lowerCAmelCase__ ) )] __SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase__ ) , lowerCAmelCase__ ) ) if max_length is not None and len(lowerCAmelCase__ ) > max_length: __SCREAMING_SNAKE_CASE : int = toks[:max_length] if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0: while len(lowerCAmelCase__ ) < min_length: __SCREAMING_SNAKE_CASE : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] __SCREAMING_SNAKE_CASE : List[Any] = [t[0] for t in toks] # Ensure consistency __SCREAMING_SNAKE_CASE : str = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) if " " not in output_txt and len(lowerCAmelCase__ ) > 1: __SCREAMING_SNAKE_CASE : Optional[int] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ ) ) if with_prefix_space: __SCREAMING_SNAKE_CASE : int = ''' ''' + output_txt __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) return output_txt, output_ids def __magic_name__( self :Optional[int] , **lowerCAmelCase__ :Dict ) -> Dict: kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer('''m xxx ɪ''' , do_phonemize=lowerCAmelCase__ ).input_ids self.assertEqual(lowerCAmelCase__ , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=lowerCAmelCase__ ).input_ids self.assertEqual(lowerCAmelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer('''maɪ c''' , do_phonemize=lowerCAmelCase__ ).input_ids self.assertEqual(lowerCAmelCase__ , [3, 200] ) # mai should be <unk> (=3) def __magic_name__( self :Any ) -> str: __SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE : Optional[int] = '''Hello how are you''' __SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) self.assertEqual(lowerCAmelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def __magic_name__( self :Optional[int] ) -> int: __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE : Optional[int] = '''Hello how are you''' __SCREAMING_SNAKE_CASE : Tuple = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(lowerCAmelCase__ ).input_ids , tokenizer(lowerCAmelCase__ , do_phonemize=lowerCAmelCase__ ).input_ids ) def __magic_name__( self :Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE : Dict = '''Hello how are you''' __SCREAMING_SNAKE_CASE : Any = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(tokenizer(lowerCAmelCase__ ).input_ids ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE : List[str] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(sample_ids[0] ) __SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , batch_tokens[0] ) self.assertEqual(lowerCAmelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def __magic_name__( self :int ) -> Any: __SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you''' __SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) self.assertEqual(lowerCAmelCase__ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you''' __SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(lowerCAmelCase__ ).input_ids , tokenizer(lowerCAmelCase__ , do_phonemize=lowerCAmelCase__ ).input_ids ) def __magic_name__( self :str ) -> Any: __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off __SCREAMING_SNAKE_CASE : int = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter __SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(sample_ids[0] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , batch_tokens[0] ) self.assertEqual(lowerCAmelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter __SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowerCAmelCase__ , filter_word_delimiter_token=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , batch_tokens[0] ) self.assertEqual(lowerCAmelCase__ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE : Any = '''Hello how are you''' __SCREAMING_SNAKE_CASE : Dict = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(tokenizer(lowerCAmelCase__ ).input_ids , filter_word_delimiter_token=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) __SCREAMING_SNAKE_CASE : List[Any] = '''Hello how are you''' __SCREAMING_SNAKE_CASE : Tuple = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(tokenizer(lowerCAmelCase__ ).input_ids , filter_word_delimiter_token=lowerCAmelCase__ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = '''Hello how are you''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCAmelCase__ , phonemizer_lang='''en-us''' ).input_ids __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowerCAmelCase__ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(lowerCAmelCase__ , '''ɛ l o h aʊ a ʁ j u''' ) def __magic_name__( self :Any ) -> int: __SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) __SCREAMING_SNAKE_CASE : str = '''Hello how Are you''' __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hello how are you''' __SCREAMING_SNAKE_CASE : Dict = tokenizer(lowerCAmelCase__ ).input_ids __SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCAmelCase__ ).input_ids self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off __SCREAMING_SNAKE_CASE : Optional[int] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on __SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def __magic_name__( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : str = [d[key] for d in offsets] return retrieved_list def __magic_name__( self :int ) -> str: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" __SCREAMING_SNAKE_CASE : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on __SCREAMING_SNAKE_CASE : Any = tokenizer.decode(lowerCAmelCase__ , output_char_offsets=lowerCAmelCase__ , filter_word_delimiter_token=lowerCAmelCase__ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def __magic_name__( self :Optional[Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ): self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase__ ) ) # transform list to ModelOutput __SCREAMING_SNAKE_CASE : List[str] = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): [recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ) for la, la in zip(lowerCAmelCase__ , lowerCAmelCase__ )] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off __SCREAMING_SNAKE_CASE : Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char __SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ , output_char_offsets=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = [tokenizer.decode(lowerCAmelCase__ , output_char_offsets=lowerCAmelCase__ ) for ids in sample_ids] check_list_tuples_equal(lowerCAmelCase__ , lowerCAmelCase__ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def __magic_name__( self :Tuple ) -> Any: pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def __magic_name__( self :int ) -> str: pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def __magic_name__( self :Any ) -> Any: pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def __magic_name__( self :Dict ) -> str: pass def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size __SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ ) self.assertNotEqual(lowerCAmelCase__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __SCREAMING_SNAKE_CASE : Optional[int] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] __SCREAMING_SNAKE_CASE : Dict = tokenizer.add_tokens(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size __SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ ) self.assertNotEqual(lowerCAmelCase__ , 0 ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) self.assertEqual(lowerCAmelCase__ , all_size + len(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : str = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=lowerCAmelCase__ ) self.assertGreaterEqual(len(lowerCAmelCase__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __SCREAMING_SNAKE_CASE : str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} __SCREAMING_SNAKE_CASE : Tuple = tokenizer.add_special_tokens(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.vocab_size __SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) self.assertNotEqual(lowerCAmelCase__ , 0 ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) self.assertEqual(lowerCAmelCase__ , all_size_a + len(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=lowerCAmelCase__ ) self.assertGreaterEqual(len(lowerCAmelCase__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def __magic_name__( self :str ) -> Tuple: pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def __magic_name__( self :Tuple ) -> str: pass def __magic_name__( self :Dict ) -> Any: # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE : int = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertIsInstance(output['''text'''] , lowerCAmelCase__ )
9
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
0
'''simple docstring''' import functools def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ): # Validation if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(_SCREAMING_SNAKE_CASE ) == 0: return 0 if min(_SCREAMING_SNAKE_CASE ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(_SCREAMING_SNAKE_CASE ) >= 366: raise ValueError('All days elements should be less than 366' ) __a : List[Any] = set(_SCREAMING_SNAKE_CASE ) @functools.cache def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _A : List[str] = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[Any] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
142
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict: pass @is_pipeline_test @require_vision @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): _UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]: lowerCamelCase__ : List[str] = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase__ : Union[str, Any] = [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]: lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 ) lowerCamelCase__ : Union[str, Any] = len(A ) self.assertGreater(A , 0 ) self.assertEqual( A , [ { '''score''': ANY(A ), '''label''': ANY(A ), '''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )}, } for i in range(A ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __lowerCamelCase ( self : Dict ) ->List[Any]: pass @require_torch def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]: lowerCamelCase__ : Optional[int] = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase__ : List[Any] = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , ) self.assertEqual( nested_simplify(A , decimals=4 ) , [ {'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] , ) lowerCamelCase__ : str = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(A , decimals=4 ) , [ [ {'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] ] , ) @require_torch @slow def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]: lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ : str = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(A , decimals=4 ) , [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ] , ) lowerCamelCase__ : List[Any] = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(A , decimals=4 ) , [ [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __lowerCamelCase ( self : int ) ->Union[str, Any]: pass @require_torch @slow def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]: lowerCamelCase__ : Optional[Any] = 0.2 lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ : Any = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , ) self.assertEqual( nested_simplify(A , decimals=4 ) , [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, ] , ) @require_torch @slow def __lowerCamelCase ( self : Any ) ->str: lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' ) lowerCamelCase__ : List[str] = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , ) self.assertEqual( nested_simplify(A , decimals=4 ) , [ {'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, ] , )
142
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
125
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = prime_factors(snake_case__ ) if is_square_free(snake_case__ ): return -1 if len(snake_case__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
125
1
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput snake_case_ : Optional[Any] = "scheduler_config.json" class __snake_case ( a ): UpperCAmelCase__ : List[str] = 1 UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Tuple = 3 UpperCAmelCase__ : Union[str, Any] = 4 UpperCAmelCase__ : Tuple = 5 @dataclass class __snake_case ( a ): UpperCAmelCase__ : jnp.ndarray class __snake_case : UpperCAmelCase__ : Tuple = SCHEDULER_CONFIG_NAME UpperCAmelCase__ : Tuple = ['''dtype'''] UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : List[str] = True @classmethod def lowerCamelCase ( cls : int , _snake_case : Dict[str, Any] = None , _snake_case : Optional[str] = None , _snake_case : Any=False , **_snake_case : List[Any] , ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = cls.load_config( pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , **_snake_case , ) UpperCAmelCase_ , UpperCAmelCase_ = cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case) if hasattr(_snake_case , '''create_state''') and getattr(_snake_case , '''has_state''' , _snake_case): UpperCAmelCase_ = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def lowerCamelCase ( self : Tuple , _snake_case : Union[str, os.PathLike] , _snake_case : bool = False , **_snake_case : str): """simple docstring""" self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case) @property def lowerCamelCase ( self : Optional[Any]): """simple docstring""" return self._get_compatibles() @classmethod def lowerCamelCase ( cls : Optional[int]): """simple docstring""" UpperCAmelCase_ = list(set([cls.__name__] + cls._compatibles)) UpperCAmelCase_ = importlib.import_module(__name__.split('''.''')[0]) UpperCAmelCase_ = [ getattr(_snake_case , _snake_case) for c in compatible_classes_str if hasattr(_snake_case , _snake_case) ] return compatible_classes def A (__A : jnp.ndarray , __A : Tuple[int] ) -> jnp.ndarray: """simple docstring""" assert len(__A ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__A ) - x.ndim) ) , __A ) def A (__A : int , __A : Tuple=0.999 , __A : int=jnp.floataa ) -> jnp.ndarray: """simple docstring""" def alpha_bar(__A : Optional[int] ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 UpperCAmelCase_ = [] for i in range(__A ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(__A ) / alpha_bar(__A ) , __A ) ) return jnp.array(__A , dtype=__A ) @flax.struct.dataclass class __snake_case : UpperCAmelCase__ : jnp.ndarray UpperCAmelCase__ : jnp.ndarray UpperCAmelCase__ : jnp.ndarray @classmethod def lowerCamelCase ( cls : Any , _snake_case : Dict): """simple docstring""" UpperCAmelCase_ = scheduler.config if config.trained_betas is not None: UpperCAmelCase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype) elif config.beta_schedule == "linear": UpperCAmelCase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype) else: raise NotImplementedError( F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""") UpperCAmelCase_ = 1.0 - betas UpperCAmelCase_ = jnp.cumprod(_snake_case , axis=0) return cls( alphas=_snake_case , betas=_snake_case , alphas_cumprod=_snake_case , ) def A (__A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray ) -> Dict: """simple docstring""" UpperCAmelCase_ = state.alphas_cumprod UpperCAmelCase_ = alphas_cumprod[timesteps] ** 0.5 UpperCAmelCase_ = sqrt_alpha_prod.flatten() UpperCAmelCase_ = broadcast_to_shape_from_left(__A , original_samples.shape ) UpperCAmelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCAmelCase_ = sqrt_one_minus_alpha_prod.flatten() UpperCAmelCase_ = broadcast_to_shape_from_left(__A , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A (__A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray ) -> Dict: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = get_sqrt_alpha_prod(__A , __A , __A , __A ) UpperCAmelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A (__A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray ) -> Any: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = get_sqrt_alpha_prod(__A , __A , __A , __A ) UpperCAmelCase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
51
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __UpperCAmelCase (_UpperCAmelCase ): def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ): '''simple docstring''' super().__init__(**UpperCAmelCase_ ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(UpperCAmelCase_ ) def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ): '''simple docstring''' _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} # preprocess args if "points_per_batch" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: _SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ): '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ ) def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": _SCREAMING_SNAKE_CASE = self.get_inference_context() with inference_context(): _SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device ) _SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) _SCREAMING_SNAKE_CASE = image_embeddings _SCREAMING_SNAKE_CASE = grid_points.shape[1] _SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :] _SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch] _SCREAMING_SNAKE_CASE = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" ) _SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" ) _SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist() _SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist() _SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks _SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""] _SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) _SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = {} if output_rle_mask: _SCREAMING_SNAKE_CASE = rle_mask if output_bboxes_mask: _SCREAMING_SNAKE_CASE = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
306
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def snake_case (A_ :Optional[int] ): '''simple docstring''' a : Union[str, Any] = SwinConfig( embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , ) a : Tuple = DetaConfig( backbone_config=A_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=A_ , with_box_refine=A_ , two_stage=A_ , ) # set labels a : Union[str, Any] = 'huggingface/label-files' if "o365" in model_name: a : Dict = 3_6_6 a : Tuple = 'object365-id2label.json' else: a : Any = 9_1 a : str = 'coco-detection-id2label.json' a : str = num_labels a : Union[str, Any] = json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type='dataset' ) ) , 'r' ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Optional[Any] = idalabel a : List[str] = {v: k for k, v in idalabel.items()} return config def snake_case (A_ :Union[str, Any] ): '''simple docstring''' a : List[Any] = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def snake_case (A_ :Dict , A_ :List[str] , A_ :str ): '''simple docstring''' a : Tuple = dct.pop(A_ ) a : Tuple = val def snake_case (A_ :Optional[Any] , A_ :Tuple ): '''simple docstring''' a : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): a : Tuple = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) a : Union[str, Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) a : int = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a : List[Any] = in_proj_weight[:dim, :] a : Union[str, Any] = in_proj_bias[: dim] a : List[Any] = in_proj_weight[ dim : dim * 2, : ] a : List[str] = in_proj_bias[ dim : dim * 2 ] a : str = in_proj_weight[ -dim :, : ] a : int = in_proj_bias[-dim :] # fmt: on def snake_case (A_ :Any , A_ :Union[str, Any] ): '''simple docstring''' a : Union[str, Any] = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention a : int = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a : Any = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a : Tuple = in_proj_weight[:hidden_size, :] a : Union[str, Any] = in_proj_bias[:hidden_size] a : List[Any] = in_proj_weight[ hidden_size : hidden_size * 2, : ] a : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2] a : List[Any] = in_proj_weight[-hidden_size:, :] a : Optional[Any] = in_proj_bias[-hidden_size:] def snake_case (): '''simple docstring''' a : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg' a : Tuple = Image.open(requests.get(A_ , stream=A_ ).raw ) return im @torch.no_grad() def snake_case (A_ :int , A_ :int , A_ :str ): '''simple docstring''' a : List[Any] = get_deta_config(A_ ) # load original state dict if model_name == "deta-swin-large": a : Optional[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": a : str = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(f'''Model name {model_name} not supported''' ) a : List[Any] = torch.load(A_ , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(A_ , param.shape ) # rename keys a : int = create_rename_keys(A_ ) for src, dest in rename_keys: rename_key(A_ , A_ , A_ ) read_in_swin_q_k_v(A_ , config.backbone_config ) read_in_decoder_q_k_v(A_ , A_ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: a : int = state_dict.pop(A_ ) a : Union[str, Any] = val if "input_proj" in key: a : List[str] = state_dict.pop(A_ ) a : str = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: a : Union[str, Any] = state_dict.pop(A_ ) a : str = val # finally, create HuggingFace model and load state dict a : int = DetaForObjectDetection(A_ ) model.load_state_dict(A_ ) model.eval() a : int = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(A_ ) # load image processor a : List[str] = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image a : Optional[int] = prepare_img() a : Optional[Any] = processor(images=A_ , return_tensors='pt' ) a : List[Any] = encoding['pixel_values'] a : Optional[Any] = model(pixel_values.to(A_ ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": a : int = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) a : Optional[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": a : Any = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) a : List[str] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A_ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A_ ) , atol=1E-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) processor.save_pretrained(A_ ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(f'''jozhang97/{model_name}''' ) processor.push_to_hub(f'''jozhang97/{model_name}''' ) if __name__ == "__main__": _UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _UpperCamelCase : str = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
186
"""simple docstring""" import argparse from collections import defaultdict import yaml _UpperCamelCase : int = 'docs/source/en/_toctree.yml' def snake_case (A_ :Optional[Any] ): '''simple docstring''' a : List[Any] = defaultdict(A_ ) for doc in model_doc: counts[doc["local"]] += 1 a : Optional[Any] = [key for key, value in counts.items() if value > 1] a : List[str] = [] for duplicate_key in duplicates: a : int = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(A_ ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(A_ , key=lambda A_ : s["title"].lower() ) def snake_case (A_ :List[str]=False ): '''simple docstring''' with open(A_ , encoding='utf-8' ) as f: a : Dict = yaml.safe_load(f.read() ) # Get to the API doc a : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 a : List[str] = content[api_idx]['sections'] # Then to the model doc a : Optional[int] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 a : Optional[Any] = api_doc[model_idx]['sections'] a : Dict = [(idx, section) for idx, section in enumerate(A_ ) if 'sections' in section] a : List[str] = False for idx, modality_doc in modalities_docs: a : str = modality_doc['sections'] a : str = clean_model_doc_toc(A_ ) if old_modality_doc != new_modality_doc: a : str = True if overwrite: a : Any = new_modality_doc if diff: if overwrite: a : Any = model_doc a : str = api_doc with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(A_ , allow_unicode=A_ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": _UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _UpperCamelCase : Any = parser.parse_args() check_model_doc(args.fix_and_overwrite)
186
1
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __lowerCAmelCase = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): def run_func(_SCREAMING_SNAKE_CASE ): @wraps(_SCREAMING_SNAKE_CASE ) def run_in_eager_mode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @wraps(_SCREAMING_SNAKE_CASE ) @tf.function(experimental_compile=_SCREAMING_SNAKE_CASE ) def run_in_graph_mode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = random.Random() _snake_case = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_SCREAMING_SNAKE_CASE , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = "TensorFlow" @property def lowercase (self ) -> Union[str, Any]: return tf.__version__ def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float: # initialize GPU on separate process _snake_case = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case = self._prepare_inference_func(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return self._measure_speed(_inference ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float: _snake_case = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case = self._prepare_train_func(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return self._measure_speed(_train ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase ) _snake_case = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case = self._prepare_inference_func(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return self._measure_memory(_inference ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase ) _snake_case = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case = self._prepare_train_func(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return self._measure_memory(_train ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Callable[[], None]: _snake_case = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case = ( hasattr(UpperCAmelCase , """architectures""" ) and isinstance(config.architectures , UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case = __import__("""transformers""" , fromlist=[model_class] ) _snake_case = getattr(UpperCAmelCase , UpperCAmelCase ) _snake_case = model_cls(UpperCAmelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase ) # encoder-decoder has vocab size saved differently _snake_case = config.vocab_size if hasattr(UpperCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _snake_case = random_input_ids(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase , training=UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(UpperCAmelCase , training=UpperCAmelCase ) _snake_case = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Callable[[], None]: _snake_case = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case = ( hasattr(UpperCAmelCase , """architectures""" ) and isinstance(config.architectures , UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case = __import__("""transformers""" , fromlist=[model_class] ) _snake_case = getattr(UpperCAmelCase , UpperCAmelCase ) _snake_case = model_cls(UpperCAmelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase ) # encoder-decoder has vocab size saved differently _snake_case = config.vocab_size if hasattr(UpperCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _snake_case = random_input_ids(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _snake_case = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )[0] _snake_case = tf.gradients(UpperCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _snake_case = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )[0] _snake_case = tf.gradients(UpperCAmelCase , model.trainable_variables ) return gradients _snake_case = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowercase (self , UpperCAmelCase ) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(UpperCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case = timeit.repeat( UpperCAmelCase , repeat=self.args.repeat , number=10 , ) return min(UpperCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def lowercase (self , UpperCAmelCase ) -> [Memory, MemorySummary]: logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase ) _snake_case = meminfo.used _snake_case = Memory(UpperCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case = None else: _snake_case = measure_peak_memory_cpu(UpperCAmelCase ) _snake_case = Memory(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case = stop_memory_tracing(UpperCAmelCase ) if memory is None: _snake_case = summary.total else: _snake_case = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
341
'''simple docstring''' from math import factorial, radians def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ): _snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _snake_case = radians(_SCREAMING_SNAKE_CASE ) _snake_case = angle_in_radians _snake_case = 3 _snake_case = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) _snake_case = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__('doctest').testmod()
341
1
def __lowercase ( _UpperCamelCase ) ->str: """simple docstring""" lowercase : List[str] = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowercase : Tuple = '''''' lowercase : List[Any] = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_UpperCamelCase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowercase : Any = 0, 0 # length[i] shows the length of palindromic substring with center i lowercase : Optional[int] = [1 for i in range(len(_UpperCamelCase ) )] # for each character in new_string find corresponding palindromic string lowercase : Any = 0 for j in range(len(_UpperCamelCase ) ): lowercase : str = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 ) while ( j - k >= 0 and j + k < len(_UpperCamelCase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowercase : str = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowercase : int = j - k + 1 # noqa: E741 lowercase : Tuple = j + k - 1 # update max_length and start position if max_length < length[j]: lowercase : int = length[j] lowercase : Optional[Any] = j # create that string lowercase : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
359
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __SCREAMING_SNAKE_CASE ( A__ ): A : Tuple = 'pegasus' A : int = ['past_key_values'] A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ , ): lowercase : List[Any] = vocab_size lowercase : List[Any] = max_position_embeddings lowercase : Dict = d_model lowercase : Optional[Any] = encoder_ffn_dim lowercase : int = encoder_layers lowercase : str = encoder_attention_heads lowercase : Tuple = decoder_ffn_dim lowercase : List[str] = decoder_layers lowercase : List[Any] = decoder_attention_heads lowercase : Tuple = dropout lowercase : int = attention_dropout lowercase : Optional[Any] = activation_dropout lowercase : Dict = activation_function lowercase : Optional[Any] = init_std lowercase : Tuple = encoder_layerdrop lowercase : Optional[int] = decoder_layerdrop lowercase : List[Any] = use_cache lowercase : Any = encoder_layers lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model
173
0
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () _snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). _snake_case = [0, 25, 50] _snake_case = [25, 50, 75] _snake_case = fuzz.membership.trimf(X, abca) _snake_case = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. _snake_case = np.ones(75) _snake_case = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) _snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) _snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) _snake_case = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) _snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] _snake_case = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) _snake_case = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] _snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] _snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
294
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case = logging.get_logger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
294
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class UpperCamelCase__( unittest.TestCase ): def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : int=18 , lowerCAmelCase : Optional[int]=30 , lowerCAmelCase : str=400 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase : Tuple=False , )-> List[Any]: """simple docstring""" UpperCAmelCase = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = num_channels UpperCAmelCase = image_size UpperCAmelCase = min_resolution UpperCAmelCase = max_resolution UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_normalize UpperCAmelCase = image_mean UpperCAmelCase = image_std UpperCAmelCase = do_reduce_labels def a__( self : Any )-> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase = Image.open(dataset[0]['''file'''] ) UpperCAmelCase = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase = Image.open(ds[0]['''file'''] ) UpperCAmelCase = Image.open(ds[1]['''file'''] ) UpperCAmelCase = Image.open(ds[2]['''file'''] ) UpperCAmelCase = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ): __magic_name__ : Tuple = BeitImageProcessor if is_vision_available() else None def a__( self : Optional[int] )-> Union[str, Any]: """simple docstring""" UpperCAmelCase = BeitImageProcessingTester(self ) @property def a__( self : Optional[int] )-> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a__( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) ) def a__( self : Optional[int] )-> Union[str, Any]: """simple docstring""" UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase ) UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase ) def a__( self : Tuple )-> Optional[int]: """simple docstring""" pass def a__( self : Optional[Any] )-> List[Any]: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a__( self : Optional[Any] )-> int: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a__( self : Any )-> Dict: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a__( self : int )-> Optional[Any]: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) UpperCAmelCase = [] for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) UpperCAmelCase , UpperCAmelCase = prepare_semantic_single_inputs() UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) UpperCAmelCase , UpperCAmelCase = prepare_semantic_batch_inputs() UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def a__( self : Union[str, Any] )-> Tuple: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase , UpperCAmelCase = prepare_semantic_single_inputs() UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) UpperCAmelCase = True UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
369
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class UpperCamelCase__( nn.Module ): __magic_name__ : int __magic_name__ : int __magic_name__ : float = 0.0 __magic_name__ : int = 1 __magic_name__ : int = 1 __magic_name__ : bool = True __magic_name__ : bool = False __magic_name__ : bool = False __magic_name__ : bool = False __magic_name__ : jnp.dtype = jnp.floataa def a__( self : str )-> Dict: """simple docstring""" UpperCAmelCase = [] UpperCAmelCase = [] for i in range(self.num_layers ): UpperCAmelCase = self.in_channels if i == 0 else self.out_channels UpperCAmelCase = FlaxResnetBlockaD( in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) UpperCAmelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase ) UpperCAmelCase = resnets UpperCAmelCase = attentions if self.add_downsample: UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]: """simple docstring""" UpperCAmelCase = () for resnet, attn in zip(self.resnets , self.attentions ): UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase = self.downsamplers_a(lowerCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class UpperCamelCase__( nn.Module ): __magic_name__ : int __magic_name__ : int __magic_name__ : float = 0.0 __magic_name__ : int = 1 __magic_name__ : bool = True __magic_name__ : jnp.dtype = jnp.floataa def a__( self : List[str] )-> Any: """simple docstring""" UpperCAmelCase = [] for i in range(self.num_layers ): UpperCAmelCase = self.in_channels if i == 0 else self.out_channels UpperCAmelCase = FlaxResnetBlockaD( in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) UpperCAmelCase = resnets if self.add_downsample: UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]: """simple docstring""" UpperCAmelCase = () for resnet in self.resnets: UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase = self.downsamplers_a(lowerCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class UpperCamelCase__( nn.Module ): __magic_name__ : int __magic_name__ : int __magic_name__ : int __magic_name__ : float = 0.0 __magic_name__ : int = 1 __magic_name__ : int = 1 __magic_name__ : bool = True __magic_name__ : bool = False __magic_name__ : bool = False __magic_name__ : bool = False __magic_name__ : jnp.dtype = jnp.floataa def a__( self : List[str] )-> Tuple: """simple docstring""" UpperCAmelCase = [] UpperCAmelCase = [] for i in range(self.num_layers ): UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) UpperCAmelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase ) UpperCAmelCase = resnets UpperCAmelCase = attentions if self.add_upsample: UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]=True )-> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states UpperCAmelCase = res_hidden_states_tuple[-1] UpperCAmelCase = res_hidden_states_tuple[:-1] UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) if self.add_upsample: UpperCAmelCase = self.upsamplers_a(lowerCAmelCase ) return hidden_states class UpperCamelCase__( nn.Module ): __magic_name__ : int __magic_name__ : int __magic_name__ : int __magic_name__ : float = 0.0 __magic_name__ : int = 1 __magic_name__ : bool = True __magic_name__ : jnp.dtype = jnp.floataa def a__( self : Optional[int] )-> str: """simple docstring""" UpperCAmelCase = [] for i in range(self.num_layers ): UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) UpperCAmelCase = resnets if self.add_upsample: UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=True )-> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states UpperCAmelCase = res_hidden_states_tuple[-1] UpperCAmelCase = res_hidden_states_tuple[:-1] UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) if self.add_upsample: UpperCAmelCase = self.upsamplers_a(lowerCAmelCase ) return hidden_states class UpperCamelCase__( nn.Module ): __magic_name__ : int __magic_name__ : float = 0.0 __magic_name__ : int = 1 __magic_name__ : int = 1 __magic_name__ : bool = False __magic_name__ : bool = False __magic_name__ : jnp.dtype = jnp.floataa def a__( self : int )-> Optional[int]: """simple docstring""" UpperCAmelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] UpperCAmelCase = [] for _ in range(self.num_layers ): UpperCAmelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase ) UpperCAmelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) UpperCAmelCase = resnets UpperCAmelCase = attentions def __call__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any=True )-> Union[str, Any]: """simple docstring""" UpperCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) return hidden_states
91
0
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: UpperCAmelCase_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ ) return flax_params def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]: UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : List[Any] = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } UpperCAmelCase_ : str = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key UpperCAmelCase_ : Dict = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): UpperCAmelCase_ : Any = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): UpperCAmelCase_ : int = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number UpperCAmelCase_ : Any = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = new_key.replace('''encoder''', '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number UpperCAmelCase_ : str = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = flax_dict[key] UpperCAmelCase_ : List[str] = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): UpperCAmelCase_ : Dict = torch.from_numpy(converted_dict[key].T ) else: UpperCAmelCase_ : List[Any] = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Dict=False ) -> int: UpperCAmelCase_ : Optional[Any] = get_flax_param(SCREAMING_SNAKE_CASE__ ) if not use_large: UpperCAmelCase_ : List[str] = PixaStructVisionConfig() UpperCAmelCase_ : List[str] = PixaStructTextConfig() else: UpperCAmelCase_ : Dict = PixaStructVisionConfig( hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 ) UpperCAmelCase_ : Optional[int] = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 ) UpperCAmelCase_ : List[Any] = PixaStructConfig( vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) UpperCAmelCase_ : Tuple = PixaStructImageProcessor() UpperCAmelCase_ : Optional[int] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ ) if use_large: UpperCAmelCase_ : Union[str, Any] = 4096 UpperCAmelCase_ : Union[str, Any] = True # mkdir if needed os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": snake_case_ : List[str] = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") snake_case_ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
125
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer snake_case_ : List[str] = logging.get_logger(__name__) snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} snake_case_ : Optional[int] = { "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } snake_case_ : Optional[Any] = { "yjernite/retribert-base-uncased": 5_12, } snake_case_ : Union[str, Any] = { "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class __a (lowerCamelCase ): __a : Optional[Any] = VOCAB_FILES_NAMES __a : Dict = PRETRAINED_VOCAB_FILES_MAP __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : int = PRETRAINED_INIT_CONFIGURATION __a : Union[str, Any] = RetriBertTokenizer __a : Optional[int] = ["input_ids", "attention_mask"] def __init__( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=True , __magic_name__ : int="[UNK]" , __magic_name__ : List[Any]="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Union[str, Any]="[MASK]" , __magic_name__ : int=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : Any , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , ) UpperCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars ): UpperCAmelCase_ : Dict = getattr(__magic_name__ , normalizer_state.pop('''type''' ) ) UpperCAmelCase_ : Optional[int] = do_lower_case UpperCAmelCase_ : Optional[int] = strip_accents UpperCAmelCase_ : Tuple = tokenize_chinese_chars UpperCAmelCase_ : Optional[int] = normalizer_class(**__magic_name__ ) UpperCAmelCase_ : List[str] = do_lower_case def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : Dict = [self.sep_token_id] UpperCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
125
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __lowercase = logging.get_logger(__name__) class _lowercase ( __a ): """simple docstring""" lowercase__ = '''upernet''' def __init__( self : List[str] , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=512 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : int=[1, 2, 3, 6] , UpperCamelCase__ : str=True , UpperCamelCase__ : int=0.4 , UpperCamelCase__ : int=384 , UpperCamelCase__ : int=256 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=255 , **UpperCamelCase__ : Dict , ) -> str: '''simple docstring''' super().__init__(**UpperCamelCase__ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __UpperCamelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCamelCase =backbone_config.get('''model_type''' ) __UpperCamelCase =CONFIG_MAPPING[backbone_model_type] __UpperCamelCase =config_class.from_dict(UpperCamelCase__ ) __UpperCamelCase =backbone_config __UpperCamelCase =hidden_size __UpperCamelCase =initializer_range __UpperCamelCase =pool_scales __UpperCamelCase =use_auxiliary_head __UpperCamelCase =auxiliary_loss_weight __UpperCamelCase =auxiliary_in_channels __UpperCamelCase =auxiliary_channels __UpperCamelCase =auxiliary_num_convs __UpperCamelCase =auxiliary_concat_input __UpperCamelCase =loss_ignore_index def UpperCAmelCase_ ( self : Dict ) -> Tuple: '''simple docstring''' __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.backbone_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
85
"""simple docstring""" def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0 ): """simple docstring""" __UpperCamelCase =-1 __UpperCamelCase =0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c __UpperCamelCase =(n * n - 2 * a * n) // (2 * n - 2 * a) __UpperCamelCase =n - a - b if c * c == (a * a + b * b): __UpperCamelCase =a * b * c if candidate >= product: __UpperCamelCase =candidate return product if __name__ == "__main__": print(f'''{solution() = }''')
85
1
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class _lowerCamelCase : """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Dict: '''simple docstring''' A_ : int = parent A_ : Union[str, Any] = batch_size A_ : List[Any] = seq_length A_ : Dict = is_training A_ : Optional[Any] = use_input_mask A_ : int = use_token_type_ids A_ : Optional[int] = use_labels A_ : Union[str, Any] = vocab_size A_ : Tuple = hidden_size A_ : List[str] = num_hidden_layers A_ : str = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : int = hidden_act A_ : Dict = hidden_dropout_prob A_ : Optional[Any] = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Dict = type_vocab_size A_ : Optional[int] = type_sequence_label_size A_ : int = initializer_range A_ : int = num_labels A_ : Dict = num_choices A_ : int = relative_attention A_ : List[str] = position_biased_input A_ : List[Any] = pos_att_type A_ : List[str] = scope def _snake_case ( self )->Any: '''simple docstring''' A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Optional[int] = None if self.use_input_mask: A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Any = None if self.use_token_type_ids: A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : List[str] = None A_ : List[str] = None A_ : str = None if self.use_labels: A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Optional[Any] = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' A_ : Dict = TFDebertaVaModel(config=_SCREAMING_SNAKE_CASE ) A_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ : str = [input_ids, input_mask] A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) A_ : Tuple = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any: '''simple docstring''' A_ : Dict = TFDebertaVaForMaskedLM(config=_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ : Tuple = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' A_ : List[Any] = self.num_labels A_ : int = TFDebertaVaForSequenceClassification(config=_SCREAMING_SNAKE_CASE ) A_ : Dict = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ : Dict = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]: '''simple docstring''' A_ : str = self.num_labels A_ : List[Any] = TFDebertaVaForTokenClassification(config=_SCREAMING_SNAKE_CASE ) A_ : List[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ : List[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' A_ : List[Any] = TFDebertaVaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) A_ : Any = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ : int = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) snake_case = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def _snake_case ( self )->str: '''simple docstring''' A_ : Dict = TFDebertaVaModelTester(self ) A_ : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self )->Tuple: '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Tuple: '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Optional[int]: '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self )->Dict: '''simple docstring''' A_ : List[Any] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_tf class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason='''Model not available yet''' ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' pass @slow def _snake_case ( self )->Tuple: '''simple docstring''' A_ : List[str] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) A_ : Union[str, Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) A_ : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A_ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0] A_ : List[str] = tf.constant( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
186
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = "glpn" def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=-1 , **_SCREAMING_SNAKE_CASE , )->Any: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = num_channels A_ : Union[str, Any] = num_encoder_blocks A_ : int = depths A_ : Dict = sr_ratios A_ : Any = hidden_sizes A_ : int = patch_sizes A_ : Optional[int] = strides A_ : str = mlp_ratios A_ : List[str] = num_attention_heads A_ : str = hidden_act A_ : int = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : Tuple = drop_path_rate A_ : Optional[int] = layer_norm_eps A_ : List[str] = decoder_hidden_size A_ : List[Any] = max_depth A_ : List[Any] = head_in_index
186
1
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline _A = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def __UpperCamelCase ( _A , _A , _A , _A , _A , _A , _A , _A=False , ): output_path.parent.mkdir(parents=_A , exist_ok=_A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _A , _A , f=output_path.as_posix() , input_names=_A , output_names=_A , dynamic_axes=_A , do_constant_folding=_A , use_external_data_format=_A , enable_onnx_checker=_A , opset_version=_A , ) else: export( _A , _A , f=output_path.as_posix() , input_names=_A , output_names=_A , dynamic_axes=_A , do_constant_folding=_A , opset_version=_A , ) @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowerCAmelCase_ = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: lowerCAmelCase_ = '''cpu''' lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(_A , torch_dtype=_A ).to(_A ) lowerCAmelCase_ = Path(_A ) # TEXT ENCODER lowerCAmelCase_ = pipeline.text_encoder.config.max_position_embeddings lowerCAmelCase_ = pipeline.text_encoder.config.hidden_size lowerCAmelCase_ = pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_A , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=_A , ) del pipeline.text_encoder # UNET lowerCAmelCase_ = pipeline.unet.config.in_channels lowerCAmelCase_ = pipeline.unet.config.sample_size lowerCAmelCase_ = output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , _A , _A , _A ).to(device=_A , dtype=_A ), torch.randn(2 ).to(device=_A , dtype=_A ), torch.randn(2 , _A , _A ).to(device=_A , dtype=_A ), False, ) , output_path=_A , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=_A , use_external_data_format=_A , ) lowerCAmelCase_ = str(unet_path.absolute().as_posix() ) lowerCAmelCase_ = os.path.dirname(_A ) lowerCAmelCase_ = onnx.load(_A ) # clean up existing tensor files shutil.rmtree(_A ) os.mkdir(_A ) # collate external tensor files into one onnx.save_model( _A , _A , save_as_external_data=_A , all_tensors_to_one_file=_A , location='''weights.pb''' , convert_attribute=_A , ) del pipeline.unet # VAE ENCODER lowerCAmelCase_ = pipeline.vae lowerCAmelCase_ = vae_encoder.config.in_channels lowerCAmelCase_ = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowerCAmelCase_ = lambda _A , _A : vae_encoder.encode(_A , _A )[0].sample() onnx_export( _A , model_args=( torch.randn(1 , _A , _A , _A ).to(device=_A , dtype=_A ), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=_A , ) # VAE DECODER lowerCAmelCase_ = pipeline.vae lowerCAmelCase_ = vae_decoder.config.latent_channels lowerCAmelCase_ = vae_decoder.config.out_channels # forward only through the decoder part lowerCAmelCase_ = vae_encoder.decode onnx_export( _A , model_args=( torch.randn(1 , _A , _A , _A ).to(device=_A , dtype=_A ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=_A , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowerCAmelCase_ = pipeline.safety_checker lowerCAmelCase_ = safety_checker.config.vision_config.num_channels lowerCAmelCase_ = safety_checker.config.vision_config.image_size lowerCAmelCase_ = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , _A , _A , _A , ).to(device=_A , dtype=_A ), torch.randn(1 , _A , _A , _A ).to(device=_A , dtype=_A ), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=_A , ) del pipeline.safety_checker lowerCAmelCase_ = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' ) lowerCAmelCase_ = pipeline.feature_extractor else: lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=_A , feature_extractor=_A , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(_A ) print('''ONNX pipeline saved to''' , _A ) del pipeline del onnx_pipeline lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(_A , provider='''CPUExecutionProvider''' ) print('''ONNX pipeline is loadable''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') _A = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
356
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): __snake_case = ['pixel_values'] def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = do_resize lowerCAmelCase_ = do_rescale lowerCAmelCase_ = size_divisor lowerCAmelCase_ = resample super().__init__(**UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ ) # Rounds the height and width down to the closest multiple of size_divisor lowerCAmelCase_ = height // size_divisor * size_divisor lowerCAmelCase_ = width // size_divisor * size_divisor lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ ) return image def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ): """simple docstring""" return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor lowerCAmelCase_ = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images] if do_resize: lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images] lowerCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
167
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } _lowerCAmelCase : Optional[int] = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ): """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCAmelCase__ = """lm_head""" UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: UpperCAmelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase__ = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(_lowerCAmelCase )[0].split("." )[-2] UpperCAmelCase__ = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowerCAmelCase ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = full_name.split("conv_layers." )[-1] UpperCAmelCase__ = name.split("." ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True ): """simple docstring""" if config_path is not None: UpperCAmelCase__ = UniSpeechConfig.from_pretrained(_lowerCAmelCase ) else: UpperCAmelCase__ = UniSpeechConfig() if is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load_from_json(_lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(_lowerCAmelCase , "vocab.json" ) if not os.path.isdir(_lowerCAmelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 42 UpperCAmelCase__ = 43 with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ = WavaVecaPhonemeCTCTokenizer( _lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) UpperCAmelCase__ = UniSpeechForCTC(_lowerCAmelCase ) else: UpperCAmelCase__ = UniSpeechForPreTraining(_lowerCAmelCase ) if is_finetuned: UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) hf_unispeech.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _lowerCAmelCase : List[Any] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
169
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") _UpperCAmelCase = {"""target_lang""": """fi""", """source_lang""": """en"""} _UpperCAmelCase = """>>zh<<""" _UpperCAmelCase = """Helsinki-NLP/""" if is_torch_available(): _UpperCAmelCase = """pt""" elif is_tf_available(): _UpperCAmelCase = """tf""" else: _UpperCAmelCase = """jax""" @require_sentencepiece class a ( UpperCAmelCase__ , unittest.TestCase ): UpperCamelCase : Any = MarianTokenizer UpperCamelCase : List[Any] = False UpperCamelCase : Optional[Any] = True def lowerCamelCase__ ( self : Optional[Any] ) -> int: '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE_: str =["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] SCREAMING_SNAKE_CASE_: List[Any] =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] =Path(self.tmpdirname ) save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) SCREAMING_SNAKE_CASE_: str =MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : str , **lowerCAmelCase : Any ) -> MarianTokenizer: '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> int: '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase__ ( self : Any ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: List[str] ="""</s>""" SCREAMING_SNAKE_CASE_: List[str] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase ) def lowerCamelCase__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowerCAmelCase ) , 9 ) def lowerCamelCase__ ( self : Dict ) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[Any] =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' ) SCREAMING_SNAKE_CASE_: List[Any] =en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: int =[38, 121, 14, 697, 3_8848, 0] self.assertListEqual(lowerCAmelCase , batch.input_ids[0] ) SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =[x.name for x in Path(lowerCAmelCase ).glob("""*""" )] self.assertIn("""source.spm""" , lowerCAmelCase ) MarianTokenizer.from_pretrained(lowerCAmelCase ) def lowerCamelCase__ ( self : int ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer() SCREAMING_SNAKE_CASE_: str =tok( ["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase__ ( self : Optional[int] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: int =self.get_tokenizer() SCREAMING_SNAKE_CASE_: int =tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowerCAmelCase , return_tensors=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase__ ( self : str ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase__ ( self : int ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: List[str] =MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) SCREAMING_SNAKE_CASE_: Optional[int] ="""Tämä on testi""" SCREAMING_SNAKE_CASE_: Union[str, Any] ="""This is a test""" SCREAMING_SNAKE_CASE_: List[Any] =[76, 7, 2047, 2] SCREAMING_SNAKE_CASE_: Any =[69, 12, 11, 940, 2] SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(lowerCAmelCase ).input_ids self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =tokenizer(text_target=lowerCAmelCase ).input_ids self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase , lowerCAmelCase )
173
0
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class lowerCAmelCase ( ctypes.Structure ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def lowercase () -> str: if os.name == "nt": SCREAMING_SNAKE_CASE = CursorInfo() SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) SCREAMING_SNAKE_CASE = False ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def lowercase () -> Optional[int]: if os.name == "nt": SCREAMING_SNAKE_CASE = CursorInfo() SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) SCREAMING_SNAKE_CASE = True ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def lowercase () -> str: try: hide_cursor() yield finally: show_cursor()
360
"""simple docstring""" class lowerCAmelCase : '''simple docstring''' def __init__( self , lowerCAmelCase__ ) -> None: SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = [0] * size SCREAMING_SNAKE_CASE = [0] * size @staticmethod def __A ( lowerCAmelCase__ ) -> int: return index | (index + 1) @staticmethod def __A ( lowerCAmelCase__ ) -> int: return (index & (index + 1)) - 1 def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None: SCREAMING_SNAKE_CASE = value while index < self.size: SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ ) + 1 if current_left_border == index: SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = self.get_next(lowerCAmelCase__ ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: right -= 1 # Because of right is exclusive SCREAMING_SNAKE_CASE = 0 while left <= right: SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ ) if left <= current_left: SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.tree[right] ) SCREAMING_SNAKE_CASE = current_left else: SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
38
0
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Dict=9_9 , lowerCAmelCase__ : Union[str, Any]=2_4 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : List[str]=6 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Union[str, Any]=5_1_2 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=1_0_0_0 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = parent _UpperCAmelCase : Optional[Any] = batch_size _UpperCAmelCase : Optional[Any] = seq_length _UpperCAmelCase : List[Any] = is_training _UpperCAmelCase : Optional[int] = use_input_mask _UpperCAmelCase : Optional[Any] = use_token_type_ids _UpperCAmelCase : int = use_labels _UpperCAmelCase : List[Any] = vocab_size _UpperCAmelCase : List[str] = hidden_size _UpperCAmelCase : List[Any] = num_hidden_layers _UpperCAmelCase : List[str] = num_attention_heads _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : Tuple = hidden_act _UpperCAmelCase : int = hidden_dropout_prob _UpperCAmelCase : str = attention_probs_dropout_prob _UpperCAmelCase : List[Any] = max_position_embeddings _UpperCAmelCase : Union[str, Any] = type_vocab_size _UpperCAmelCase : List[str] = type_sequence_label_size _UpperCAmelCase : Any = initializer_range _UpperCAmelCase : Optional[Any] = num_labels _UpperCAmelCase : Tuple = scope _UpperCAmelCase : Optional[int] = range_bbox def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase : Optional[int] = bbox[i, j, 3] _UpperCAmelCase : Optional[int] = bbox[i, j, 1] _UpperCAmelCase : str = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase : List[str] = bbox[i, j, 2] _UpperCAmelCase : Optional[int] = bbox[i, j, 0] _UpperCAmelCase : List[str] = t _UpperCAmelCase : Tuple = None if self.use_input_mask: _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: _UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : List[str] = None _UpperCAmelCase : List[str] = None if self.use_labels: _UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , ) -> List[str]: """simple docstring""" _UpperCAmelCase : Tuple = LiltModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() _UpperCAmelCase : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ ) _UpperCAmelCase : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_ ) _UpperCAmelCase : int = model(lowercase_ , bbox=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.num_labels _UpperCAmelCase : Optional[Any] = LiltForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() _UpperCAmelCase : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() _UpperCAmelCase : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = self.prepare_config_and_inputs() ( _UpperCAmelCase ) : List[str] = config_and_inputs _UpperCAmelCase : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Dict = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase_ : int = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str = False UpperCamelCase_ : Optional[Any] = False def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Tuple: """simple docstring""" return True def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = LiltModelTester(self ) _UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def _lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase : Dict = type self.model_tester.create_and_check_model(*lowercase_ ) def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) def _lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) @slow def _lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = LiltModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch @slow class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" _UpperCAmelCase : List[str] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(lowercase_ ) _UpperCAmelCase : str = torch.tensor([[1, 2]] , device=lowercase_ ) _UpperCAmelCase : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_ ) # forward pass with torch.no_grad(): _UpperCAmelCase : Dict = model(input_ids=lowercase_ , bbox=lowercase_ ) _UpperCAmelCase : str = torch.Size([1, 2, 7_6_8] ) _UpperCAmelCase : Dict = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3 ) )
145
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256} SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = resample SCREAMING_SNAKE_CASE_ : List[str] = do_rescale SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = offset SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa) if offset: SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_) if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
0
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class a ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ) -> Tuple: _a = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) _a = AutoTokenizer.from_pretrained('google/mt5-small' ) _a = tokenizer('Hello there' , return_tensors='np' ).input_ids _a = tokenizer('Hi I am' , return_tensors='np' ).input_ids _a = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits _a = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
360
'''simple docstring''' from __future__ import annotations def _A (lowerCAmelCase__ :int ) -> list[int]: '''simple docstring''' _a = 2 _a = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase__ ) if n > 1: factors.append(lowerCAmelCase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
104
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _snake_case ( lowercase_ ): lowerCAmelCase_ : int = (DPMSolverSinglestepScheduler,) lowerCAmelCase_ : Optional[int] = (("num_inference_steps", 25),) def lowerCAmelCase__ ( self , **a__ ) -> int: '''simple docstring''' snake_case_ = { "num_train_timesteps": 1_000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**a__ ) return config def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Optional[int]: '''simple docstring''' snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("num_inference_steps" , a__ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**a__ ) snake_case_ = scheduler_class(**a__ ) scheduler.set_timesteps(a__ ) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a__ ) snake_case_ = scheduler_class.from_pretrained(a__ ) new_scheduler.set_timesteps(a__ ) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ , snake_case_ = sample, sample for t in range(a__ , time_step + scheduler.config.solver_order + 1 ): snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' pass def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Any: '''simple docstring''' snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("num_inference_steps" , a__ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**a__ ) scheduler.set_timesteps(a__ ) # copy over dummy past residuals (must be after setting timesteps) snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a__ ) snake_case_ = scheduler_class.from_pretrained(a__ ) # copy over dummy past residuals new_scheduler.set_timesteps(a__ ) # copy over dummy past residual (must be after setting timesteps) snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCAmelCase__ ( self , a__=None , **a__ ) -> List[str]: '''simple docstring''' if scheduler is None: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**a__ ) snake_case_ = scheduler_class(**a__ ) snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**a__ ) snake_case_ = scheduler_class(**a__ ) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(a__ ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = model(a__ , a__ ) snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample return sample def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) snake_case_ = 50 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(a__ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): snake_case_ = model(a__ , a__ ) snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=a__ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) snake_case_ = self.full_loop(scheduler=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 snake_case_ = DEISMultistepScheduler.from_config(scheduler.config ) snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config ) snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config ) snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) snake_case_ = self.full_loop(scheduler=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' self.check_over_configs(thresholding=a__ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type="dpmsolver++" , solver_order=a__ , solver_type=a__ , ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a__ ) def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , ) snake_case_ = self.full_loop( solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , ) assert not torch.isnan(a__ ).any(), "Samples have nan numbers" def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' self.check_over_configs(lower_order_final=a__ ) self.check_over_configs(lower_order_final=a__ ) def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' self.check_over_configs(variance_type=a__ ) self.check_over_configs(variance_type="learned_range" ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=a__ , time_step=0 ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = self.full_loop() snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.full_loop(use_karras_sigmas=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.full_loop(prediction_type="v_prediction" ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 ) snake_case_ = scheduler_class(**a__ ) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.half() scheduler.set_timesteps(a__ ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = model(a__ , a__ ) snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample assert sample.dtype == torch.floataa
85
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
85
1
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def UpperCAmelCase ( _lowerCamelCase ): # picklable for multiprocessing return x.sum() def UpperCAmelCase ( _lowerCamelCase ): # picklable for multiprocessing return i + 1 @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 42 a__ = 42 class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: A : List[str] = {} A : int = [] A : Union[str, Any] = 1 A : Optional[int] = [1, 2] A : int = {"a": 1, "b": 2} A : Any = {"a": [1, 2], "b": [3, 4]} A : List[str] = {"a": {"1": 1}, "b": 2} A : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4} A : Optional[Any] = {} A : Dict = [] A : List[Any] = 2 A : List[Any] = [2, 3] A : str = {"a": 2, "b": 3} A : List[str] = {"a": [2, 3], "b": [4, 5]} A : int = {"a": {"1": 2}, "b": 3} A : str = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) A : Any = 2 self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) A : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A : Optional[Any] = {"a": 2, "b": 0, "c": 2} A : Any = { "a": np.eye(2 ).astype(__lowerCamelCase ), "b": np.zeros(3 ).astype(__lowerCamelCase ), "c": np.ones(2 ).astype(__lowerCamelCase ), } self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase , num_proc=__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase , num_proc=__lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(__lowerCamelCase ): # can't pickle a local lambda map_nested(lambda __lowerCamelCase : x + 1 , __lowerCamelCase , num_proc=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: A : int = {"a": 1, "b": 2} A : Any = {"a": 3, "b": 4} A : List[Any] = {"a": 5, "b": 6} A : Any = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ) , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: class lowerCamelCase_ : '''simple docstring''' a__ = "bar" A : Tuple = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(__lowerCamelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A : Any = {f"""{i}""": i for i in range(_lowerCamelCase )} A : List[Any] = map_nested(lambda _lowerCamelCase : x + 10 , _lowerCamelCase , num_proc=_lowerCamelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class lowerCamelCase_ ( _A ): '''simple docstring''' @require_tf def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: import tensorflow as tf from tensorflow.keras import layers A : Any = layers.Dense(2 ) def gen_random_output(): A : Optional[Any] = tf.random.uniform((1, 3) ) return model(__lowerCamelCase ).numpy() with temp_seed(42 , set_tensorflow=__lowerCamelCase ): A : Optional[Any] = gen_random_output() with temp_seed(42 , set_tensorflow=__lowerCamelCase ): A : Tuple = gen_random_output() A : int = gen_random_output() np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]: import torch def gen_random_output(): A : Dict = torch.nn.Linear(3 , 2 ) A : str = torch.rand(1 , 3 ) return model(__lowerCamelCase ).detach().numpy() with temp_seed(42 , set_pytorch=__lowerCamelCase ): A : str = gen_random_output() with temp_seed(42 , set_pytorch=__lowerCamelCase ): A : int = gen_random_output() A : List[str] = gen_random_output() np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A : Any = gen_random_output() with temp_seed(42 ): A : Tuple = gen_random_output() A : Optional[int] = gen_random_output() np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" , [{}] ) def UpperCAmelCase ( _lowerCamelCase ): A : int = NestedDataStructure(_lowerCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : str = NestedDataStructure(_lowerCamelCase ).flatten() assert output == expected_output def UpperCAmelCase ( ): A : Dict = A(x=1 , y="foobar" ) A : Any = {"x": 1, "y": "foobar"} assert asdict(_lowerCamelCase ) == expected_output A : Optional[Any] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]} A : Optional[int] = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_lowerCamelCase ) == expected_output with pytest.raises(_lowerCamelCase ): asdict([1, A(x=10 , y="foo" )] ) def UpperCAmelCase ( _lowerCamelCase ): return text.split() def UpperCAmelCase ( _lowerCamelCase ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def UpperCAmelCase ( ): with Pool(2 ) as pool: A : Optional[Any] = list(iflatmap_unordered(_lowerCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A : int = list(iflatmap_unordered(_lowerCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A : Any = [] for yield_time, content in iflatmap_unordered( _lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_lowerCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(_lowerCamelCase ) == 4
256
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Any = set() A : int = [] def parse_line(_lowerCamelCase ): for line in fp: if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Any = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(_lowerCamelCase ) > 0: A : Union[str, Any] = "\n".join(_lowerCamelCase ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(_lowerCamelCase ) buffer.clear() continue else: A : Union[str, Any] = line.strip() buffer.append(_lowerCamelCase ) if from_gh: for filename in os.listdir(_lowerCamelCase ): A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase ) if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) else: try: with zipfile.ZipFile(_lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Tuple = set() A : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def UpperCAmelCase ( _lowerCamelCase ): return values.split("," ) __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets) __SCREAMING_SNAKE_CASE = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
256
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Union[str, Any] = logging.get_logger(__name__) a : str = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase : List[Any] ="""convnextv2""" def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=224 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]: super().__init__(**_lowerCamelCase ) a : str = num_channels a : int = patch_size a : Union[str, Any] = num_stages a : Any = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes a : Any = [3, 3, 9, 3] if depths is None else depths a : Optional[int] = hidden_act a : Tuple = initializer_range a : int = layer_norm_eps a : List[Any] = drop_path_rate a : Union[str, Any] = image_size a : Any = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a : Tuple = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
105
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowercase ( unittest.TestCase): def a_ ( self : List[str] ): """simple docstring""" A_ : Tuple = tempfile.mkdtemp() # fmt: off A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] A_ : Tuple = {'''unk_token''': '''<unk>'''} A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) A_ : str = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def a_ ( self : Any , **_lowerCamelCase : Dict ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def a_ ( self : int ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : List[str] ): """simple docstring""" A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : List[str] ): """simple docstring""" A_ : int = self.get_tokenizer() A_ : int = self.get_rust_tokenizer() A_ : Optional[Any] = self.get_image_processor() A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def a_ ( self : str ): """simple docstring""" A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[Any] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def a_ ( self : int ): """simple docstring""" A_ : List[str] = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Tuple = self.prepare_image_inputs() A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a_ ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : int = self.get_tokenizer() A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Union[str, Any] = '''lower newer''' A_ : int = processor(text=_lowerCamelCase ) A_ : Any = tokenizer(_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : str ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[Any] = self.get_tokenizer() A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Union[str, Any] = '''lower newer''' A_ : Optional[Any] = self.prepare_image_inputs() A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def a_ ( self : List[Any] ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : int = self.get_tokenizer() A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Tuple = self.prepare_image_inputs() A_ : Tuple = self.prepare_image_inputs() A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def a_ ( self : List[Any] ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : List[str] = processor.batch_decode(_lowerCamelCase ) A_ : str = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
167
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore lowerCAmelCase__ = ''' Human: <<task>> Assistant: ''' lowerCAmelCase__ = '''huggingface-tools/default-prompts''' lowerCAmelCase__ = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def snake_case_ ( A_ : Optional[Any], A_ : List[str], A_ : int="run" ): '''simple docstring''' if prompt_or_repo_id is None: _lowerCamelCase : Dict = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''', A_ ) is not None: return prompt_or_repo_id _lowerCamelCase : Union[str, Any] = cached_file( A_, PROMPT_FILES[mode], repo_type='''dataset''', user_agent={'''agent''': agent_name} ) with open(A_, '''r''', encoding='''utf-8''' ) as f: return f.read()
175
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case ( _lowercase): def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Optional[int]="last" , __lowerCAmelCase : str=None , __lowerCAmelCase : int=None , ): """simple docstring""" _lowerCamelCase : Dict = parent _lowerCamelCase : List[str] = batch_size _lowerCamelCase : Dict = seq_length _lowerCamelCase : List[Any] = is_training _lowerCamelCase : Dict = use_input_lengths _lowerCamelCase : Tuple = use_token_type_ids _lowerCamelCase : Any = use_labels _lowerCamelCase : Optional[Any] = gelu_activation _lowerCamelCase : Optional[Any] = sinusoidal_embeddings _lowerCamelCase : Dict = causal _lowerCamelCase : Dict = asm _lowerCamelCase : str = n_langs _lowerCamelCase : str = vocab_size _lowerCamelCase : Optional[int] = n_special _lowerCamelCase : Dict = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : int = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Any = type_vocab_size _lowerCamelCase : Optional[int] = type_sequence_label_size _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : List[Any] = num_labels _lowerCamelCase : Dict = num_choices _lowerCamelCase : str = summary_type _lowerCamelCase : List[str] = use_proj _lowerCamelCase : int = scope def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[int] = None if self.use_input_lengths: _lowerCamelCase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : List[str] = None _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : str = ids_tensor([self.batch_size] , 2 ).float() _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Tuple = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ): """simple docstring""" _lowerCamelCase : Optional[Any] = FlaubertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : str = model(__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : Tuple = FlaubertWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , ): """simple docstring""" _lowerCamelCase : str = FlaubertForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) _lowerCamelCase : List[str] = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) ((_lowerCamelCase) , ) : str = result_with_labels.to_tuple() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) ((_lowerCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : Dict = FlaubertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase ) _lowerCamelCase : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : Any = self.num_labels _lowerCamelCase : List[str] = FlaubertForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , ): """simple docstring""" _lowerCamelCase : List[str] = self.num_choices _lowerCamelCase : Any = FlaubertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : int = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[Any] = config_and_inputs _lowerCamelCase : int = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : List[str] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : List[Any] = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ): """simple docstring""" _lowerCamelCase : Dict = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) _lowerCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = FlaubertModelTester(self ) _lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = FlaubertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _lowerCamelCase : Any = True _lowerCamelCase : int = model_class(config=__lowerCAmelCase ) _lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = torch.jit.trace( __lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) ) _lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) ) @require_torch class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) _lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): _lowerCamelCase : Any = model(__lowerCAmelCase )[0] _lowerCamelCase : Optional[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
175
1
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: snake_case : int = filter(lambda lowercase : p.requires_grad ,model.parameters() ) snake_case : List[str] = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCamelCase : str = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict: if metric == "rouge2": snake_case : Tuple = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": snake_case : List[Any] = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": snake_case : Tuple = """{val_avg_em:.4f}-{step_count}""" elif metric == "loss": snake_case : Dict = """{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" """ function.""" ) snake_case : List[str] = ModelCheckpoint( dirpath=lowercase ,filename=lowercase ,monitor=f"""val_{metric}""" ,mode="""max""" ,save_top_k=1 ,every_n_epochs=1 ,) return checkpoint_callback def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]: return EarlyStopping( monitor=f"""val_{metric}""" ,mode="""min""" if """loss""" in metric else """max""" ,patience=lowercase ,verbose=lowercase ,) class __lowercase (pl.Callback ): """simple docstring""" def UpperCAmelCase ( self , A , A ) -> List[Any]: snake_case : Optional[int] = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__lowerCamelCase ) @rank_zero_only def UpperCAmelCase ( self , A , A , A , A=True ) -> Optional[Any]: logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) snake_case : str = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results snake_case : Union[str, Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": snake_case : Dict = od / """test_results.txt""" snake_case : str = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. snake_case : Any = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" snake_case : List[str] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=__lowerCamelCase ) generations_file.parent.mkdir(exist_ok=__lowerCamelCase ) with open(__lowerCamelCase , """a+""" ) as writer: for key in sorted(__lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue snake_case : int = metrics[key] if isinstance(__lowerCamelCase , torch.Tensor ): snake_case : Any = val.item() snake_case : Union[str, Any] = f"""{key}: {val:.6f}\n""" writer.write(__lowerCamelCase ) if not save_generations: return if "preds" in metrics: snake_case : Any = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(__lowerCamelCase ) @rank_zero_only def UpperCAmelCase ( self , A , A ) -> Optional[int]: try: snake_case : Union[str, Any] = pl_module.model.model.num_parameters() except AttributeError: snake_case : int = pl_module.model.num_parameters() snake_case : Union[str, Any] = count_trainable_parameters(__lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCAmelCase ( self , A , A ) -> Union[str, Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__lowerCamelCase , __lowerCamelCase , """test""" ) @rank_zero_only def UpperCAmelCase ( self , A , A ) -> Union[str, Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
124
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """layoutlmv3""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=50_265 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : str=3_072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1E-5 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1_024 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=32 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=64 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :int = max_ad_position_embeddings UpperCamelCase :Tuple = coordinate_size UpperCamelCase :List[Any] = shape_size UpperCamelCase :Union[str, Any] = has_relative_attention_bias UpperCamelCase :Any = rel_pos_bins UpperCamelCase :Optional[Any] = max_rel_pos UpperCamelCase :str = has_spatial_attention_bias UpperCamelCase :Tuple = rel_ad_pos_bins UpperCamelCase :Optional[int] = max_rel_ad_pos UpperCamelCase :Tuple = text_embed UpperCamelCase :str = visual_embed UpperCamelCase :Optional[Any] = input_size UpperCamelCase :str = num_channels UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = version.parse("""1.12""" ) @property def _A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _A ( self : str ): return 1E-5 @property def _A ( self : Dict ): return 12 def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase :Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase ) UpperCamelCase :int = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase :Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase :Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase :List[str] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = dict( processor( __lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) ) return inputs
38
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __snake_case : Tuple =logging.get_logger(__name__) class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : str = question_encoder lowerCAmelCase__ : Any = generator lowerCAmelCase__ : Optional[Any] = self.question_encoder def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Tuple: """simple docstring""" if os.path.isfile(__lowerCamelCase ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase ) lowerCAmelCase__ : Dict = os.path.join(__lowerCamelCase ,'''question_encoder_tokenizer''' ) lowerCAmelCase__ : Dict = os.path.join(__lowerCamelCase ,'''generator_tokenizer''' ) self.question_encoder.save_pretrained(__lowerCamelCase ) self.generator.save_pretrained(__lowerCamelCase ) @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> Optional[int]: """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer lowerCAmelCase__ : Optional[Any] = kwargs.pop('''config''' ,__lowerCamelCase ) if config is None: lowerCAmelCase__ : int = RagConfig.from_pretrained(__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( __lowerCamelCase ,config=config.question_encoder ,subfolder='''question_encoder_tokenizer''' ) lowerCAmelCase__ : str = AutoTokenizer.from_pretrained( __lowerCamelCase ,config=config.generator ,subfolder='''generator_tokenizer''' ) return cls(question_encoder=__lowerCamelCase ,generator=__lowerCamelCase ) def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple: """simple docstring""" return self.current_tokenizer(*__lowerCamelCase ,**__lowerCamelCase ) def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> int: """simple docstring""" return self.generator.batch_decode(*__lowerCamelCase ,**__lowerCamelCase ) def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[int]: """simple docstring""" return self.generator.decode(*__lowerCamelCase ,**__lowerCamelCase ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : Tuple = self.question_encoder def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" lowerCAmelCase__ : str = self.generator def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = "longest" ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> BatchEncoding: """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' ,__lowerCamelCase ,) if max_length is None: lowerCAmelCase__ : Union[str, Any] = self.current_tokenizer.model_max_length lowerCAmelCase__ : List[str] = self( __lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_length=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,**__lowerCamelCase ,) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCAmelCase__ : str = self.current_tokenizer.model_max_length lowerCAmelCase__ : Optional[int] = self( text_target=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_tensors=__lowerCamelCase ,padding=__lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,**__lowerCamelCase ,) lowerCAmelCase__ : Optional[Any] = labels['''input_ids'''] return model_inputs
94
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}] __snake_case : Tuple ={ '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
94
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
'''simple docstring''' def _A ( A__ = 10 , A__ = 22 ): """simple docstring""" __lowercase = range(1 , A__ ) __lowercase = range(1 , A__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(10, 22) = }')
104
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def snake_case (__lowercase ) -> str: '''simple docstring''' if "://" in dataset_path: _snake_case : Any = dataset_path.split("://" )[1] return dataset_path def snake_case (__lowercase ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def snake_case (__lowercase , __lowercase , __lowercase ) -> str: '''simple docstring''' _snake_case : Union[str, Any] = not is_remote_filesystem(_lowerCAmelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) ) else: fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase ) def snake_case () -> None: '''simple docstring''' if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _snake_case : List[str] = None _snake_case : str = None _snake_case : Optional[int] = threading.Lock()
354
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
284
0
"""simple docstring""" def lowercase ( a__ : list[list[int]] , a__ : int , a__ : int , a__ : set ) -> int: _UpperCamelCase , _UpperCamelCase = len(a__ ), len(grid[0] ) if ( min(a__ , a__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCamelCase = 0 count += depth_first_search(a__ , row + 1 , a__ , a__ ) count += depth_first_search(a__ , row - 1 , a__ , a__ ) count += depth_first_search(a__ , a__ , col + 1 , a__ ) count += depth_first_search(a__ , a__ , col - 1 , a__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
256
"""simple docstring""" from typing import List import numpy as np def lowercase ( a__ : dict ) -> int: _UpperCamelCase = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) _UpperCamelCase = max(lists_lengths.values() , default=0 ) return max(1 , a__ ) def lowercase ( a__ : int , a__ : int ) -> List[range]: _UpperCamelCase = [] for group_idx in range(a__ ): _UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCamelCase = range(a__ , start + num_shards_to_add ) shards_indices_per_group.append(a__ ) return shards_indices_per_group def lowercase ( a__ : dict , a__ : int ) -> List[dict]: _UpperCamelCase = _number_of_shards_in_gen_kwargs(a__ ) if num_shards == 1: return [dict(a__ )] else: _UpperCamelCase = _distribute_shards(num_shards=a__ , max_num_jobs=a__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(a__ , a__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(a__ ) ) ] def lowercase ( a__ : List[dict] ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , a__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowercase ( a__ : np.random.Generator , a__ : dict ) -> dict: _UpperCamelCase = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )} _UpperCamelCase = {} for size in list_sizes: _UpperCamelCase = list(range(a__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCamelCase = dict(a__ ) for key, value in shuffled_kwargs.items(): if isinstance(a__ , a__ ): _UpperCamelCase = [value[i] for i in indices_per_size[len(a__ )]] return shuffled_kwargs
256
1
def UpperCamelCase ( _lowerCamelCase : int = 1_00 ): A__ = 0 A__ = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
356
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase : def __init__( self :str , lowercase_ :str , )-> str: A__ = parent A__ = 13 A__ = 7 A__ = True A__ = True A__ = False A__ = True A__ = 99 A__ = 32 A__ = 2 A__ = 4 A__ = 37 A__ = "gelu" A__ = 0.1 A__ = 0.1 A__ = 5_12 A__ = 16 A__ = 2 A__ = 0.0_2 A__ = 3 A__ = 4 A__ = None def UpperCAmelCase_ ( self :Union[str, Any] )-> int: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = ids_tensor([self.batch_size] , self.num_choices ) A__ = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :str )-> List[str]: A__ = TFDistilBertModel(config=lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) A__ = [input_ids, input_mask] A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] )-> Optional[int]: A__ = TFDistilBertForMaskedLM(config=lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self :Any , lowercase_ :str , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] )-> Optional[int]: A__ = TFDistilBertForQuestionAnswering(config=lowercase_ ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, } A__ = model(lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[int] )-> Any: A__ = self.num_labels A__ = TFDistilBertForSequenceClassification(lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self :str , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :int , lowercase_ :Union[str, Any] )-> str: A__ = self.num_choices A__ = TFDistilBertForMultipleChoice(lowercase_ ) A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) A__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self :str , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :Tuple )-> Tuple: A__ = self.num_labels A__ = TFDistilBertForTokenClassification(lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self :Any )-> Union[str, Any]: A__ = self.prepare_config_and_inputs() ((A__), (A__), (A__), (A__), (A__), (A__)) = config_and_inputs A__ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): __lowercase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __lowercase = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase = False __lowercase = False def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]: A__ = TFDistilBertModelTester(self ) A__ = ConfigTester(self , config_class=lowercase_ , dim=37 ) def UpperCAmelCase_ ( self :Tuple )-> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self :int )-> Tuple: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase_ ) def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ ) def UpperCAmelCase_ ( self :str )-> str: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ ) def UpperCAmelCase_ ( self :List[str] )-> Dict: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ ) def UpperCAmelCase_ ( self :List[str] )-> Optional[int]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ ) def UpperCAmelCase_ ( self :str )-> int: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ ) @slow def UpperCAmelCase_ ( self :List[str] )-> Dict: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): A__ = TFDistilBertModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_tf class UpperCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self :List[Any] )-> Any: A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) A__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A__ = model(lowercase_ )[0] A__ = [1, 6, 7_68] self.assertEqual(output.shape , lowercase_ ) A__ = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
123
0
from math import ceil def __lowercase ( lowerCamelCase : int = 1001 ): UpperCamelCase_ : Optional[Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): UpperCamelCase_ : int = 2 * i + 1 UpperCamelCase_ : Tuple = 2 * i UpperCamelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
175
from maths.prime_check import is_prime def __lowercase ( lowerCamelCase : int ): if not isinstance(lowerCamelCase , lowerCamelCase ): UpperCamelCase_ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase ) if is_prime(lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
175
1
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCAmelCase_ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowerCAmelCase_ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''', """emoji""": True, }, } ] lowerCAmelCase_ = 0 for log in Path().glob('*.log'): lowerCAmelCase_ = 0 with open(log, 'r') as f: for line in f: lowerCAmelCase_ = json.loads(line) if line.get('nodeid', '') != "": lowerCAmelCase_ = line["""nodeid"""] if line.get('duration', None) is not None: lowerCAmelCase_ = F'''{line["duration"]:.4f}''' if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCAmelCase_ = [] log.unlink() lowerCAmelCase_ = """""" lowerCAmelCase_ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowerCAmelCase_ = [] lowerCAmelCase_ = {} for test in failed_tests: lowerCAmelCase_ = test[0].split('::') lowerCAmelCase_ = data[0].split('/')[-1] if data[0] not in filesafailed: lowerCAmelCase_ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCAmelCase_ = [test[0] for test in failed_table] lowerCAmelCase_ = list(set(files)) # Count number of instances in failed_tests lowerCAmelCase_ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCAmelCase_ = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: lowerCAmelCase_ = """Too many failed tests, please see the full report in the Action results.""" lowerCAmelCase_ = len(err) + 10 lowerCAmelCase_ = message[: 3_000 - offset] + F'''\n...\n```\n{err}''' print(F'''### {message}''') else: lowerCAmelCase_ = """No failed tests! 🤗""" print(F'''## {message}''') payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient lowerCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": lowerCAmelCase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowerCAmelCase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } payload.append(action_button) lowerCAmelCase_ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''', } ], } payload.append(date_report) lowerCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) lowerCAmelCase_ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCAmelCase_ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowerCAmelCase_ = row[0] else: lowerCAmelCase_ = """""" lowerCAmelCase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''', }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
364
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' lowerCAmelCase : str = ["pixel_values"] def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None: """simple docstring""" super().__init__(**_snake_case ) lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224} lowercase__ : Optional[int] = get_size_dict(_snake_case ) lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' ) lowercase__ : Tuple = do_resize lowercase__ : List[Any] = do_rescale lowercase__ : Any = do_normalize lowercase__ : List[str] = do_center_crop lowercase__ : Optional[Any] = crop_size lowercase__ : Union[str, Any] = size lowercase__ : Any = resample lowercase__ : int = rescale_factor lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray: """simple docstring""" lowercase__ : List[str] = get_size_dict(_snake_case ) if "shortest_edge" in size: lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: lowercase__ : int = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" ) return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray: """simple docstring""" lowercase__ : Optional[Any] = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray: """simple docstring""" return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray: """simple docstring""" return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature: """simple docstring""" lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case ) lowercase__ : Tuple = resample if resample is not None else self.resample lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Optional[int] = size if size is not None else self.size lowercase__ : int = get_size_dict(_snake_case ) if not is_batched(_snake_case ): lowercase__ : Optional[Any] = [images] if not valid_images(_snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase__ : str = [to_numpy_array(_snake_case ) for image in images] if do_resize: lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images] if do_center_crop: lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images] if do_rescale: lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images] if do_normalize: lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images] lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images] lowercase__ : Any = {'''pixel_values''': images} return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
302
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case : Tuple = logging.get_logger(__name__) snake_case : Any = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'owlvit_text_model' def __init__( self , _lowerCamelCase=4_9408 , _lowerCamelCase=512 , _lowerCamelCase=2048 , _lowerCamelCase=12 , _lowerCamelCase=8 , _lowerCamelCase=16 , _lowerCamelCase="quick_gelu" , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1.0 , _lowerCamelCase=0 , _lowerCamelCase=4_9406 , _lowerCamelCase=4_9407 , **_lowerCamelCase , ): super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) a :Tuple = vocab_size a :Optional[Any] = hidden_size a :Dict = intermediate_size a :str = num_hidden_layers a :Optional[int] = num_attention_heads a :Union[str, Any] = max_position_embeddings a :Any = hidden_act a :Tuple = layer_norm_eps a :str = attention_dropout a :Union[str, Any] = initializer_range a :Union[str, Any] = initializer_factor @classmethod def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , **_lowerCamelCase ): cls._set_token_in_kwargs(_lowerCamelCase ) a , a :Optional[Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": a :Tuple = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'owlvit_vision_model' def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=3072 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=768 , _lowerCamelCase=32 , _lowerCamelCase="quick_gelu" , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1.0 , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :Tuple = hidden_size a :Any = intermediate_size a :int = num_hidden_layers a :Union[str, Any] = num_attention_heads a :Optional[Any] = num_channels a :Tuple = image_size a :Any = patch_size a :Any = hidden_act a :Dict = layer_norm_eps a :int = attention_dropout a :Tuple = initializer_range a :Any = initializer_factor @classmethod def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , **_lowerCamelCase ): cls._set_token_in_kwargs(_lowerCamelCase ) a , a :List[str] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": a :Tuple = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'owlvit' SCREAMING_SNAKE_CASE__ = True def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=512 , _lowerCamelCase=2.6592 , _lowerCamelCase=True , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) if text_config is None: a :Dict = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: a :int = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) a :Union[str, Any] = OwlViTTextConfig(**_lowerCamelCase ) a :List[Any] = OwlViTVisionConfig(**_lowerCamelCase ) a :List[Any] = projection_dim a :Union[str, Any] = logit_scale_init_value a :List[str] = return_dict a :Dict = 1.0 @classmethod def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , **_lowerCamelCase ): cls._set_token_in_kwargs(_lowerCamelCase ) a , a :int = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ): a :Any = {} a :Union[str, Any] = text_config a :Dict = vision_config return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = copy.deepcopy(self.__dict__ ) a :Tuple = self.text_config.to_dict() a :str = self.vision_config.to_dict() a :Dict = self.__class__.model_type return output class _snake_case ( _snake_case ): @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1e-4 def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = None , ): a :List[str] = super().generate_dummy_inputs( processor.tokenizer , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , framework=_lowerCamelCase ) a :Tuple = super().generate_dummy_inputs( processor.image_processor , batch_size=_lowerCamelCase , framework=_lowerCamelCase ) return {**text_input_dict, **image_input_dict} @property def SCREAMING_SNAKE_CASE__ ( self ): return 14
94
snake_case : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case : int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
94
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw).convert('RGB') return image def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding')) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding')) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight')) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias')) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight')) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias')) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',)) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''')) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias')) # fmt: on return rename_keys def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for i in range(config.vision_config.num_hidden_layers): # read in original q and v biases SCREAMING_SNAKE_CASE = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''') SCREAMING_SNAKE_CASE = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''') # next, set bias in the state dict SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase), v_bias)) SCREAMING_SNAKE_CASE = qkv_bias def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 364 if 'coco' in model_name else 224 SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=_UpperCAmelCase).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase).to_dict() elif "opt-6.7b" in model_name: SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase).to_dict() elif "t5-xl" in model_name: SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1).to_dict() elif "t5-xxl" in model_name: SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1).to_dict() SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase) return config, image_size @torch.no_grad() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b') if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl') ) SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=_UpperCAmelCase).input_ids[0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase) SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(_UpperCAmelCase).eval() SCREAMING_SNAKE_CASE = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_name_to_original[model_name] # load original model print('Loading original model...') SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_model_and_preprocess( name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase) original_model.eval() print('Done!') # update state dict keys SCREAMING_SNAKE_CASE = original_model.state_dict() SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase) if key.startswith('Qformer.bert'): SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer') if "attention.self" in key: SCREAMING_SNAKE_CASE = key.replace('self' , 'attention') if "opt_proj" in key: SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection') if "t5_proj" in key: SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection') if key.startswith('opt'): SCREAMING_SNAKE_CASE = key.replace('opt' , 'language') if key.startswith('t5'): SCREAMING_SNAKE_CASE = key.replace('t5' , 'language') SCREAMING_SNAKE_CASE = val # read in qv biases read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase) assert len(_UpperCAmelCase) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] SCREAMING_SNAKE_CASE = load_demo_image() SCREAMING_SNAKE_CASE = vis_processors['eval'](_UpperCAmelCase).unsqueeze(0).to(_UpperCAmelCase) SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt').input_ids.to(_UpperCAmelCase) # create processor SCREAMING_SNAKE_CASE = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase) SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase) SCREAMING_SNAKE_CASE = processor(images=_UpperCAmelCase , return_tensors='pt').pixel_values.to(_UpperCAmelCase) # make sure processor creates exact same pixel values assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase) original_model.to(_UpperCAmelCase) hf_model.to(_UpperCAmelCase) with torch.no_grad(): if "opt" in model_name: SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']}).logits SCREAMING_SNAKE_CASE = hf_model(_UpperCAmelCase , _UpperCAmelCase).logits else: SCREAMING_SNAKE_CASE = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']}).logits SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100) SCREAMING_SNAKE_CASE = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3]) print('First values of HF logits:' , logits[0, :3, :3]) # assert values if model_name == "blip2-flan-t5-xl": SCREAMING_SNAKE_CASE = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_UpperCAmelCase) assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4) elif model_name == "blip2-flan-t5-xl-coco": SCREAMING_SNAKE_CASE = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_UpperCAmelCase) else: # cast to same type SCREAMING_SNAKE_CASE = logits.dtype assert torch.allclose(original_logits.to(_UpperCAmelCase) , _UpperCAmelCase , atol=1e-2) print('Looks ok!') print('Generating a caption...') SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids.to(_UpperCAmelCase) SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values}) SCREAMING_SNAKE_CASE = hf_model.generate( _UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _UpperCAmelCase) SCREAMING_SNAKE_CASE = input_ids.shape[1] SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase) SCREAMING_SNAKE_CASE = [text.strip() for text in output_text] print('HF generation:' , _UpperCAmelCase) if pytorch_dump_folder_path is not None: processor.save_pretrained(_UpperCAmelCase) hf_model.save_pretrained(_UpperCAmelCase) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''') hf_model.push_to_hub(F'''nielsr/{model_name}''') if __name__ == "__main__": a_ : Optional[Any] = argparse.ArgumentParser() a_ : str = [ 'blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) a_ : Optional[int] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
327
import baseaa def lowerCamelCase__ (_UpperCAmelCase): return baseaa.aaaencode(string.encode('utf-8')) def lowerCamelCase__ (_UpperCAmelCase): return baseaa.aaadecode(_UpperCAmelCase).decode('utf-8') if __name__ == "__main__": import doctest doctest.testmod()
327
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =LDMTextToImagePipeline __a =TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } __a =PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } __a =TEXT_TO_IMAGE_BATCH_PARAMS __a =False def UpperCamelCase__ ( self : List[str] ): torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _a = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _a = CLIPTextModel(__a ) _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self : Tuple , __a : Any , __a : Tuple=0 ): if str(__a ).startswith("mps" ): _a = torch.manual_seed(__a ) else: _a = torch.Generator(device=__a ).manual_seed(__a ) _a = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self : int ): _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = LDMTextToImagePipeline(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = self.get_dummy_inputs(__a ) _a = pipe(**__a ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _a = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : Union[str, Any]=torch.floataa , __a : str=0 ): _a = torch.manual_seed(__a ) _a = np.random.RandomState(__a ).standard_normal((1, 4, 32, 32) ) _a = torch.from_numpy(__a ).to(device=__a , dtype=__a ) _a = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self : Union[str, Any] ): _a = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = self.get_inputs(__a ) _a = pipe(**__a ).images _a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_56, 2_56, 3) _a = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) _a = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self : Any , __a : Optional[int] , __a : int=torch.floataa , __a : List[str]=0 ): _a = torch.manual_seed(__a ) _a = np.random.RandomState(__a ).standard_normal((1, 4, 32, 32) ) _a = torch.from_numpy(__a ).to(device=__a , dtype=__a ) _a = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self : Optional[Any] ): _a = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = self.get_inputs(__a ) _a = pipe(**__a ).images[0] _a = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) _a = np.abs(expected_image - image ).max() assert max_diff < 1e-3
63
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
284
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any: '''simple docstring''' lowercase_ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowercase_ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any: '''simple docstring''' lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowercase_ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowercase_ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowercase_ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowercase_ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowercase_ = state_dict["""decoder.embed_tokens.weight"""] lowercase_ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowercase_ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase : str = parser.parse_args() UpperCAmelCase : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
313
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list: '''simple docstring''' if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(__lowerCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
313
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A ={ "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
163
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any: __snake_case : List[Any] = dataset __snake_case : Optional[int] = process __snake_case : str = params def __len__( self : Optional[Any] ) -> Any: return len(self.dataset ) def __getitem__( self : Dict , lowerCamelCase : List[Any] ) -> List[str]: __snake_case : List[Any] = self.dataset[i] __snake_case : Tuple = self.process(lowerCamelCase , **self.params ) return processed class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None ) -> int: __snake_case : List[Any] = loader __snake_case : Dict = infer __snake_case : Tuple = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __snake_case : Union[str, Any] = None __snake_case : Optional[Any] = loader_batch_size # Internal bookkeeping __snake_case : int = None __snake_case : Optional[int] = None def __len__( self : Optional[Any] ) -> Tuple: return len(self.loader ) def __iter__( self : str ) -> Tuple: __snake_case : int = iter(self.loader ) return self def __snake_case ( self : int ) -> Any: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(lowerCamelCase , lowerCamelCase ): # Convert ModelOutput to tuple first __snake_case : Dict = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __snake_case : Union[str, Any] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __snake_case : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __snake_case : Tuple = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __snake_case : str = self._loader_batch_data.__class__(lowerCamelCase ) self._loader_batch_index += 1 return result def __snake_case ( self : Dict ) -> Union[str, Any]: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __snake_case : List[str] = next(self.iterator ) __snake_case : int = self.infer(lowerCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowerCamelCase , torch.Tensor ): __snake_case : List[Any] = processed else: __snake_case : Optional[Any] = list(processed.keys() )[0] __snake_case : List[Any] = processed[key] if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : List[str] = len(lowerCamelCase ) else: __snake_case : Tuple = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __snake_case : Optional[Any] = observed_batch_size # Setting internal index to unwrap the batch __snake_case : Union[str, Any] = processed __snake_case : str = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None ) -> Any: super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __iter__( self : Optional[int] ) -> Optional[int]: __snake_case : Union[str, Any] = iter(self.loader ) __snake_case : int = None return self def __snake_case ( self : List[Any] ) -> List[Any]: if self.subiterator is None: __snake_case : Optional[int] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __snake_case : int = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) __snake_case : int = next(self.subiterator ) return processed class a (_lowerCAmelCase ): """simple docstring""" def __iter__( self : Any ) -> Optional[Any]: __snake_case : str = iter(self.loader ) return self def __snake_case ( self : Tuple ) -> str: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. __snake_case : Dict = False __snake_case : Dict = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __snake_case : Union[str, Any] = self.loader_batch_item() __snake_case : Any = item.pop("is_last" ) accumulator.append(lowerCamelCase ) if is_last: return accumulator while not is_last: __snake_case : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(lowerCamelCase , torch.Tensor ): __snake_case : Optional[int] = processed else: __snake_case : Union[str, Any] = list(processed.keys() )[0] __snake_case : Optional[Any] = processed[key] if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : int = len(lowerCamelCase ) else: __snake_case : int = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __snake_case : Dict = observed_batch_size __snake_case : Union[str, Any] = processed __snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: __snake_case : str = self.loader_batch_item() __snake_case : str = item.pop("is_last" ) accumulator.append(lowerCamelCase ) if is_last: return accumulator else: __snake_case : List[str] = processed __snake_case : Tuple = item.pop("is_last" ) accumulator.append(lowerCamelCase ) return accumulator class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : Dataset , lowerCamelCase : str ) -> Optional[Any]: __snake_case : int = dataset __snake_case : Union[str, Any] = key def __len__( self : Tuple ) -> Union[str, Any]: return len(self.dataset ) def __getitem__( self : Optional[Any] , lowerCamelCase : str ) -> Optional[int]: return self.dataset[i][self.key] class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase : Dataset , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]: __snake_case : Any = dataset __snake_case : Any = keya __snake_case : Union[str, Any] = keya def __len__( self : Optional[int] ) -> Tuple: return len(self.dataset ) def __getitem__( self : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
123
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _UpperCAmelCase = { 'vocab_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json' ), }, 'merges_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt' ), }, 'tokenizer_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json', 'roberta-base-openai-detector': ( 'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json' ), 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json' ), }, } _UpperCAmelCase = { 'roberta-base': 5_1_2, 'roberta-large': 5_1_2, 'roberta-large-mnli': 5_1_2, 'distilroberta-base': 5_1_2, 'roberta-base-openai-detector': 5_1_2, 'roberta-large-openai-detector': 5_1_2, } class _UpperCamelCase ( lowerCAmelCase_ ): _UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : str = RobertaTokenizer def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[int]="replace" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: List[str]="</s>" , _SCREAMING_SNAKE_CASE: str="<s>" , _SCREAMING_SNAKE_CASE: Dict="<unk>" , _SCREAMING_SNAKE_CASE: Tuple="<pad>" , _SCREAMING_SNAKE_CASE: str="<mask>" , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: str=True , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Any: """simple docstring""" super().__init__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) ) UpperCamelCase_ = add_prefix_space UpperCamelCase_ = pre_tok_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = add_prefix_space UpperCamelCase_ = "post_processor" UpperCamelCase_ = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if tokenizer_component_instance: UpperCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase_ = tuple(state["sep"] ) if "cls" in state: UpperCamelCase_ = tuple(state["cls"] ) UpperCamelCase_ = False if state.get("add_prefix_space" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCamelCase_ = add_prefix_space UpperCamelCase_ = True if state.get("trim_offsets" , _SCREAMING_SNAKE_CASE ) != trim_offsets: UpperCamelCase_ = trim_offsets UpperCamelCase_ = True if changes_to_apply: UpperCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , state.pop("type" ) ) UpperCamelCase_ = component_class(**_SCREAMING_SNAKE_CASE ) setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @property def lowercase ( self: int ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Dict ) -> List[str]: """simple docstring""" UpperCamelCase_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value UpperCamelCase_ = value def lowercase ( self: Union[str, Any] , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[str] ) -> BatchEncoding: """simple docstring""" UpperCamelCase_ = kwargs.get("is_split_into_words" , _SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def lowercase ( self: Dict , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> BatchEncoding: """simple docstring""" UpperCamelCase_ = kwargs.get("is_split_into_words" , _SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCamelCase_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE ) def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCamelCase_ = [self.sep_token_id] UpperCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
328
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str: UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" ) UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} ) UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" ) return anchors[2].get_text() if __name__ == "__main__": _UpperCAmelCase = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 3_0, 'pages': '3979-3990', 'year': 2_0_1_8, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
328
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]: try: __lowerCamelCase : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __lowerCamelCase : List[Any] = default else: # KEY is set, convert it to True or False. try: __lowerCamelCase : Any = strtobool(lowerCamelCase__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value a =parse_flag_from_env("""RUN_SLOW""", default=False) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skip('Test was skipped' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]: return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]: return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]: return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]: return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Any: if test_case is None: return partial(lowerCamelCase__ , version=lowerCamelCase__ ) return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase__ ) , F"test requires torch version >= {version}" )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase__ ) a =( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase__ ) class A_ ( unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = True @classmethod def lowerCAmelCase ( cls : int): __lowerCamelCase : List[Any] = tempfile.mkdtemp() @classmethod def lowerCAmelCase ( cls : int): if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def lowerCAmelCase ( self : Any): if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(SCREAMING_SNAKE_CASE__) class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[Any]): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[mock.Mock, List[mock.Mock]]): __lowerCamelCase : Tuple = mocks if isinstance(SCREAMING_SNAKE_CASE__ ,(tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: __lowerCamelCase : int = AcceleratorState() __lowerCamelCase : Optional[int] = tensor[None].clone().to(state.device ) __lowerCamelCase : Dict = gather(lowerCamelCase__ ).cpu() __lowerCamelCase : Union[str, Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , lowerCamelCase__ ): return False return True class A_ : def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Union[str, Any] = returncode __lowerCamelCase : List[str] = stdout __lowerCamelCase : Tuple = stderr async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: while True: __lowerCamelCase : str = await stream.readline() if line: callback(lowerCamelCase__ ) else: break async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ) -> _RunOutput: if echo: print('\nRunning: ' , ' '.join(lowerCamelCase__ ) ) __lowerCamelCase : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __lowerCamelCase : int = [] __lowerCamelCase : str = [] def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ): __lowerCamelCase : Any = line.decode('utf-8' ).rstrip() sink.append(lowerCamelCase__ ) if not quiet: print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label='stderr:' ) ) ), ] , timeout=lowerCamelCase__ , ) return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_8_0 , lowerCamelCase__=False , lowerCamelCase__=True ) -> _RunOutput: __lowerCamelCase : Union[str, Any] = asyncio.get_event_loop() __lowerCamelCase : str = loop.run_until_complete( _stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) ) __lowerCamelCase : Union[str, Any] = ' '.join(lowerCamelCase__ ) if result.returncode > 0: __lowerCamelCase : Optional[int] = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class A_ ( SCREAMING_SNAKE_CASE ): pass def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> int: try: __lowerCamelCase : List[str] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(lowerCamelCase__ , 'decode' ): __lowerCamelCase : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
73
import string import numpy def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : __lowerCamelCase : List[str] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) __lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ ) def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ): '''simple docstring''' __a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __a = encrypt_key.shape[0] def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' return self.key_string.index(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : int ): '''simple docstring''' return self.key_string[round(__lowercase )] def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = len(self.key_string ) if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1: __a = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__lowercase ) def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = [char for char in text.upper() if char in self.key_string] __a = chars[-1] while len(__lowercase ) % self.break_key != 0: chars.append(__lowercase ) return "".join(__lowercase ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[ 0 ] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' __a = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __a = det % len(self.key_string ) __a = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __a = i break __a = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowercase ) ) def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' __a = self.make_decrypt_key() __a = self.process_text(text.upper() ) __a = """""" for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ): __a = text[i : i + self.break_key] __a = [self.replace_letters(__lowercase ) for char in batch] __a = numpy.array([vec] ).T __a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0] __a = """""".join( self.replace_digits(__lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase__ ( ): """simple docstring""" __a = int(input("""Enter the order of the encryption key: """ ) ) __a = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(_SCREAMING_SNAKE_CASE ): __a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) __a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) __a = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": __a = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": __a = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
302
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Dict = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : str = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : List[str] = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : List[Any] = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : str = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
215
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase_ : Any = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =state_dict.pop(lowercase ) a =val def _A ( lowercase ): """simple docstring""" a =OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: a =key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) a =value else: a =value return new_state_dict def _A ( lowercase ): """simple docstring""" a ='''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a =in_proj_weight[:2_56, :] a =in_proj_bias[:2_56] a =in_proj_weight[2_56:5_12, :] a =in_proj_bias[2_56:5_12] a =in_proj_weight[-2_56:, :] a =in_proj_bias[-2_56:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a =in_proj_weight[:2_56, :] a =in_proj_bias[:2_56] a =in_proj_weight[2_56:5_12, :] a =in_proj_bias[2_56:5_12] a =in_proj_weight[-2_56:, :] a =in_proj_bias[-2_56:] # read in weights + bias of input projection layer of cross-attention a =state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict a =in_proj_weight_cross_attn[:2_56, :] a =in_proj_bias_cross_attn[:2_56] a =in_proj_weight_cross_attn[2_56:5_12, :] a =in_proj_bias_cross_attn[2_56:5_12] a =in_proj_weight_cross_attn[-2_56:, :] a =in_proj_bias_cross_attn[-2_56:] def _A ( lowercase , lowercase ): """simple docstring""" a , a =image.size a =max(lowercase , lowercase ) a =8_00 if '''detection''' in checkpoint_url else 10_00 a =target_max_size / current_max_size a =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _A ( lowercase ): """simple docstring""" a =F.to_tensor(lowercase ) a =F.normalize(lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _A ( lowercase , lowercase , lowercase ): """simple docstring""" logger.info('''Converting model...''' ) # load original state dict a =torch.hub.load_state_dict_from_url(lowercase , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) a =rename_backbone_keys(lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a ='''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): a =state_dict.pop(lowercase ) a =val # create HuggingFace model and load state dict a =TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: a =15 a =2 a ={0: '''table''', 1: '''table rotated'''} a =idalabel a ={v: k for k, v in idalabel.items()} else: a =1_25 a =6 a ={ 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } a =idalabel a ={v: k for k, v in idalabel.items()} a =DetrImageProcessor( format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 ) a =TableTransformerForObjectDetection(lowercase ) model.load_state_dict(lowercase ) model.eval() # verify our conversion a ='''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' a =hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase ) a =Image.open(lowercase ).convert('''RGB''' ) a =normalize(resize(lowercase , lowercase ) ).unsqueeze(0 ) a =model(lowercase ) if "detection" in checkpoint_url: a =(1, 15, 3) a =torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) a =torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: a =(1, 1_25, 7) a =torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) a =torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) image_processor.save_pretrained(lowercase ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) a =( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowercase ) image_processor.push_to_hub(lowercase ) if __name__ == "__main__": lowerCamelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ : Union[str, Any] = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
215
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """facebook/mbart-large-en-ro""": 10_24, """facebook/mbart-large-cc25""": 10_24, } # fmt: off _SCREAMING_SNAKE_CASE = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Dict = VOCAB_FILES_NAMES __magic_name__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: int = ["input_ids", "attention_mask"] __magic_name__: Any = MBartTokenizer __magic_name__: List[int] = [] __magic_name__: List[int] = [] def __init__( self : int , _A : Optional[int]=None , _A : int=None , _A : List[str]="<s>" , _A : str="</s>" , _A : List[str]="</s>" , _A : Optional[int]="<s>" , _A : Any="<unk>" , _A : Any="<pad>" , _A : List[str]="<mask>" , _A : Tuple=None , _A : List[str]=None , _A : Tuple=None , **_A : Optional[Any] , ) -> Dict: """simple docstring""" snake_case_ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , **_A , ) snake_case_ : Optional[int] = vocab_file snake_case_ : Dict = False if not self.vocab_file else True snake_case_ : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) snake_case_ : Optional[int] = { lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else 'en_XX' snake_case_ : int = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCAmelCase_ ( self : List[str] , _A : str ) -> None: """simple docstring""" snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : Union[str, Any] = [self.sep_token_id] snake_case_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self : int , _A : Tuple , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : str ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) snake_case_ : int = src_lang snake_case_ : List[str] = self(_A , add_special_tokens=_A , return_tensors=_A , **_A ) snake_case_ : List[Any] = self.convert_tokens_to_ids(_A ) snake_case_ : int = tgt_lang_id return inputs def UpperCAmelCase_ ( self : str , _A : List[str] , _A : str = "en_XX" , _A : Optional[List[str]] = None , _A : str = "ro_RO" , **_A : str , ) -> BatchEncoding: """simple docstring""" snake_case_ : Optional[Any] = src_lang snake_case_ : List[Any] = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A ) def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase_ ( self : Dict , _A : Any ) -> None: """simple docstring""" snake_case_ : List[str] = self.convert_tokens_to_ids(_A ) snake_case_ : List[Any] = [] snake_case_ : Tuple = [self.eos_token_id, self.cur_lang_code] snake_case_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : str ) -> None: """simple docstring""" snake_case_ : int = self.convert_tokens_to_ids(_A ) snake_case_ : Union[str, Any] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Any = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self : Tuple , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return snake_case_ : Any = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
1
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class a ( nn.Module ): """simple docstring""" def __init__( self: List[Any] ): """simple docstring""" super().__init__() A__ = nn.Linear(3 , 4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4 , 5 ) def UpperCamelCase ( self: Dict , UpperCamelCase: Union[str, Any] ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(_snake_case ) ) ) class a ( _lowerCamelCase ): """simple docstring""" def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Tuple , *UpperCamelCase: int , **UpperCamelCase: List[Any] ): """simple docstring""" return (args[0] + 1,) + args[1:], kwargs class a ( _lowerCamelCase ): """simple docstring""" def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Any ): """simple docstring""" return output + 1 class a ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self: int ): """simple docstring""" A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(_snake_case , _snake_case ) self.assertEqual(test_model._hf_hook , _snake_case ) self.assertTrue(hasattr(_snake_case , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(_snake_case ) self.assertFalse(hasattr(_snake_case , """_hf_hook""" ) ) self.assertFalse(hasattr(_snake_case , """_old_forward""" ) ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(_snake_case , _snake_case ) add_hook_to_module(_snake_case , _snake_case , append=_snake_case ) self.assertEqual(isinstance(test_model._hf_hook , _snake_case ) , _snake_case ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(_snake_case , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(_snake_case ) self.assertFalse(hasattr(_snake_case , """_hf_hook""" ) ) self.assertFalse(hasattr(_snake_case , """_old_forward""" ) ) def UpperCamelCase ( self: Any ): """simple docstring""" A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(x + 1 ) A__ = test_model(x + 2 ) A__ = PreForwardHook() add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PreForwardHook() add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) assert torch.allclose(_snake_case , _snake_case , atol=1e-5 ) def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(_snake_case ) A__ = PostForwardHook() add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) self.assertTrue(torch.allclose(_snake_case , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PostForwardHook() add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) self.assertTrue(torch.allclose(_snake_case , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) assert torch.allclose(_snake_case , output + 2 , atol=1e-5 ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(_snake_case ) A__ = PostForwardHook() add_hook_to_module(_snake_case , _snake_case ) A__ = test_model(_snake_case ) self.assertTrue(torch.allclose(_snake_case , output + 1 ) ) self.assertTrue(outputa.requires_grad ) A__ = True A__ = test_model(_snake_case ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(_snake_case , AlignDevicesHook(io_same_device=_snake_case ) ) A__ = torch.randn(2 , 3 ).to(0 ) A__ = model(_snake_case ) self.assertEqual(output.device , torch.device(0 ) ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices A__ = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**_snake_case ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , _snake_case ) A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , _snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload A__ = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**_snake_case ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , _snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(_snake_case , execution_device=_snake_case , offload=_snake_case ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(_snake_case ) self.assertEqual(model.batchnorm.running_mean.device , _snake_case ) A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , _snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_snake_case ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(_snake_case , execution_device=_snake_case , offload=_snake_case , offload_buffers=_snake_case ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , _snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_snake_case ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( _snake_case , execution_device=_snake_case , offload=_snake_case , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(_snake_case ) self.assertEqual(model.batchnorm.running_mean.device , _snake_case ) A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , _snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_snake_case ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( _snake_case , execution_device=_snake_case , offload=_snake_case , weights_map=model.state_dict() , offload_buffers=_snake_case , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) A__ = torch.randn(2 , 3 ) A__ = model(_snake_case ) self.assertEqual(output.device , _snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_snake_case ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
357
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class a ( _lowerCamelCase ): """simple docstring""" def __init__( self: int , UpperCamelCase: str = "▁" , UpperCamelCase: bool = True , UpperCamelCase: Union[str, AddedToken] = "<unk>" , UpperCamelCase: Union[str, AddedToken] = "</s>" , UpperCamelCase: Union[str, AddedToken] = "<pad>" , ): """simple docstring""" A__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } A__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): A__ = token_dict["""token"""] A__ = Tokenizer(Unigram() ) A__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) , """ """ ), normalizers.Lowercase(), ] ) A__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=UpperCamelCase , add_prefix_space=UpperCamelCase ), pre_tokenizers.Digits(individual_digits=UpperCamelCase ), pre_tokenizers.Punctuation(), ] ) A__ = decoders.Metaspace(replacement=UpperCamelCase , add_prefix_space=UpperCamelCase ) A__ = TemplateProcessing( single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , ) A__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(UpperCamelCase , UpperCamelCase ) def UpperCamelCase ( self: Tuple , UpperCamelCase: Union[str, List[str]] , UpperCamelCase: int = 80_00 , UpperCamelCase: bool = True , ): """simple docstring""" A__ = trainers.UnigramTrainer( vocab_size=UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase , ) if isinstance(UpperCamelCase , UpperCamelCase ): A__ = [files] self._tokenizer.train(UpperCamelCase , trainer=UpperCamelCase ) self.add_unk_id() def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase: int = 80_00 , UpperCamelCase: bool = True , ): """simple docstring""" A__ = trainers.UnigramTrainer( vocab_size=UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase , ) self._tokenizer.train_from_iterator(UpperCamelCase , trainer=UpperCamelCase ) self.add_unk_id() def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = json.loads(self._tokenizer.to_str() ) A__ = self.special_tokens["""unk"""]["""id"""] A__ = Tokenizer.from_str(json.dumps(UpperCamelCase ) )
69
0
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BioGptTokenizer __SCREAMING_SNAKE_CASE : List[str] = False def __lowerCAmelCase ( self ) ->Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] SCREAMING_SNAKE_CASE : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: SCREAMING_SNAKE_CASE : Any = '''lower newer''' SCREAMING_SNAKE_CASE : int = '''lower newer''' return input_text, output_text def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : str = BioGptTokenizer(self.vocab_file , self.merges_file ) SCREAMING_SNAKE_CASE : List[str] = '''lower''' SCREAMING_SNAKE_CASE : Optional[Any] = ['''low''', '''er</w>'''] SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = tokens + ['''<unk>'''] SCREAMING_SNAKE_CASE : List[Any] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) @slow def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
313
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'ibert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : Optional[int] = quant_mode SCREAMING_SNAKE_CASE : Dict = force_dequant class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
1
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class A ( unittest.TestCase ): UpperCamelCase_ : List[Any] =JukeboxTokenizer UpperCamelCase_ : int ={ '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _A (self ): import torch __lowercase= JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) __lowercase= tokenizer(**self.metas )['input_ids'] # fmt: off __lowercase= [ torch.tensor([[ 0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7, 7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2, 4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5, 3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6, 4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8, 2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4, 4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1, 3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6, 4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9, 3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4, 4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9, 4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6, 4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3, 7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6, 4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8, 2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0, 7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5, 7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4, 7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _A (self ): import torch __lowercase= JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) __lowercase= tokenizer(**self.metas )['input_ids'] # fmt: off __lowercase= [ torch.tensor([[ 0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9, 3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1, 7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8, 2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1, 3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7, 7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5, 6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7, 3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1, 3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5, 3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4, 3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2, 3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7, 1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2, 4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7, 4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1, 7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5, 2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
304
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): UpperCamelCase_ : Dict =1 @register_to_config def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ): # standard deviation of the initial noise distribution __lowercase= sigma_max # setable values __lowercase= None self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sigma_min if sigma_min is not None else self.config.sigma_min __lowercase= sigma_max if sigma_max is not None else self.config.sigma_max __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase , lowerCAmelCase ) __lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) ) __lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __lowercase= timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __lowercase= (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowercase= timesteps.to(self.discrete_sigmas.device ) __lowercase= self.discrete_sigmas[timesteps].to(sample.device ) __lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device ) __lowercase= torch.zeros_like(lowerCAmelCase ) __lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowercase= diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __lowercase= diffusion.unsqueeze(-1 ) __lowercase= drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowercase= randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype ) __lowercase= sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowercase= step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __lowercase= step_size.unsqueeze(-1 ) __lowercase= sample + step_size * model_output __lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= timesteps.to(original_samples.device ) __lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps] __lowercase= ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) __lowercase= noise + original_samples return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowercase__ : Optional[int] = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } lowercase__ : Optional[Any] = { "roberta-base": 5_1_2, "roberta-large": 5_1_2, "roberta-large-mnli": 5_1_2, "distilroberta-base": 5_1_2, "roberta-base-openai-detector": 5_1_2, "roberta-large-openai-detector": 5_1_2, } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] _snake_case = RobertaTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , )-> List[Any]: '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('''type''' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = add_prefix_space __UpperCamelCase = '''post_processor''' __UpperCamelCase = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: __UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __UpperCamelCase = tuple(state['''sep'''] ) if "cls" in state: __UpperCamelCase = tuple(state['''cls'''] ) __UpperCamelCase = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: __UpperCamelCase = add_prefix_space __UpperCamelCase = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE_ ) != trim_offsets: __UpperCamelCase = trim_offsets __UpperCamelCase = True if changes_to_apply: __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , state.pop('''type''' ) ) __UpperCamelCase = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value __UpperCamelCase = value def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> BatchEncoding: '''simple docstring''' __UpperCamelCase = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> BatchEncoding: '''simple docstring''' __UpperCamelCase = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Tuple[str]: '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> List[int]: '''simple docstring''' __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
328
def A_ ( snake_case : list ) -> list: '''simple docstring''' __UpperCamelCase = len(snake_case ) for i in range(1 , snake_case ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(snake_case , snake_case , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip() lowercase__ : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
328
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ): __a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a ) __a = -1 __a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a ) __a = model.generate(_a , max_new_tokens=10 , do_sample=_a ) __a = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __a = TextStreamer(_a ) model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __a = cs.out[:-1] self.assertEqual(_a , _a ) def __UpperCAmelCase ( self ): __a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a ) __a = -1 __a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a ) __a = model.generate(_a , max_new_tokens=10 , do_sample=_a ) __a = tokenizer.decode(greedy_ids[0] ) __a = TextIteratorStreamer(_a ) __a = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __a = Thread(target=model.generate , kwargs=_a ) thread.start() __a = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_a , _a ) def __UpperCAmelCase ( self ): __a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a ) __a = -1 __a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a ) __a = model.generate(_a , max_new_tokens=10 , do_sample=_a ) __a = greedy_ids[:, input_ids.shape[1] :] __a = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __a = TextStreamer(_a , skip_prompt=_a ) model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __a = cs.out[:-1] self.assertEqual(_a , _a ) def __UpperCAmelCase ( self ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __a = AutoTokenizer.from_pretrained('''distilgpt2''' ) __a = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_a ) __a = -1 __a = torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id with CaptureStdout() as cs: __a = TextStreamer(_a , skip_special_tokens=_a ) model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __a = cs.out[:-1] # Remove the final "\n" __a = tokenizer(_a , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __UpperCAmelCase ( self ): __a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a ) __a = -1 __a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a ) __a = TextIteratorStreamer(_a , timeout=0.001 ) __a = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __a = Thread(target=model.generate , kwargs=_a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_a ): __a = '''''' for new_text in streamer: streamer_text += new_text
11
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCAmelCase__ : int ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __a = [] for i in range(lowerCAmelCase__ ): __a = i / num_diffusion_timesteps __a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers] __UpperCAmelCase : str = 2 @register_to_config def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ): if trained_betas is not None: __a = torch.tensor(_a , dtype=torch.floataa ) elif beta_schedule == "linear": __a = torch.linspace(_a , _a , _a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __a = betas_for_alpha_bar(_a ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __a = 1.0 - self.betas __a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_a , _a , _a ) def __UpperCAmelCase ( self , _a , _a=None ): if schedule_timesteps is None: __a = self.timesteps __a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __a = 1 if len(_a ) > 1 else 0 else: __a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep __a = self._index_counter[timestep_int] return indices[pos].item() @property def __UpperCAmelCase ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __UpperCAmelCase ( self , _a , _a , ): __a = self.index_for_timestep(_a ) if self.state_in_first_order: __a = self.sigmas[step_index] else: __a = self.sigmas_interpol[step_index] __a = sample / ((sigma**2 + 1) ** 0.5) return sample def __UpperCAmelCase ( self , _a , _a = None , _a = None , ): __a = num_inference_steps __a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy() elif self.config.timestep_spacing == "leading": __a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) __a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __a = torch.from_numpy(np.log(_a ) ).to(_a ) __a = np.interp(_a , np.arange(0 , len(_a ) ) , _a ) __a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __a = torch.from_numpy(_a ).to(device=_a ) # interpolate sigmas __a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_a ).startswith('''mps''' ): # mps does not support float64 __a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa ) else: __a = torch.from_numpy(_a ).to(_a ) # interpolate timesteps __a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype ) __a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __a = torch.cat([timesteps[:1], interleaved_timesteps] ) __a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __a = defaultdict(_a ) def __UpperCAmelCase ( self , _a ): # get log sigma __a = sigma.log() # get distribution __a = log_sigma - self.log_sigmas[:, None] # get sigmas range __a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __a = low_idx + 1 __a = self.log_sigmas[low_idx] __a = self.log_sigmas[high_idx] # interpolate sigmas __a = (low - log_sigma) / (low - high) __a = w.clamp(0 , 1 ) # transform interpolation to time range __a = (1 - w) * low_idx + w * high_idx __a = t.view(sigma.shape ) return t @property def __UpperCAmelCase ( self ): return self.sample is None def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ): __a = self.index_for_timestep(_a ) # advance index counter by 1 __a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __a = self.sigmas[step_index] __a = self.sigmas_interpol[step_index + 1] __a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __a = self.sigmas[step_index - 1] __a = self.sigmas_interpol[step_index] __a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __a = 0 __a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __a = sigma_hat if self.state_in_first_order else sigma_interpol __a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __a = sigma_hat if self.state_in_first_order else sigma_interpol __a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __a = sigma_interpol - sigma_hat # store for 2nd order step __a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __a = sigma_next - sigma_hat __a = self.sample __a = None __a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_a ) def __UpperCAmelCase ( self , _a , _a , _a , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_a ): # mps does not support float64 __a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __a = self.timesteps.to(original_samples.device ) __a = timesteps.to(original_samples.device ) __a = [self.index_for_timestep(_a , _a ) for t in timesteps] __a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __a = sigma.unsqueeze(-1 ) __a = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
11
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=1 )-> Union[str, Any]: '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=0 )-> Optional[Any]: '''simple docstring''' _UpperCAmelCase : int = [] for old_item in old_list: _UpperCAmelCase : str = old_item.replace("""in_layers.0""" , """norm1""" ) _UpperCAmelCase : int = new_item.replace("""in_layers.2""" , """conv1""" ) _UpperCAmelCase : int = new_item.replace("""out_layers.0""" , """norm2""" ) _UpperCAmelCase : Any = new_item.replace("""out_layers.3""" , """conv2""" ) _UpperCAmelCase : Union[str, Any] = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _UpperCAmelCase : Dict = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _UpperCAmelCase : Dict = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=0 )-> Dict: '''simple docstring''' _UpperCAmelCase : Optional[int] = [] for old_item in old_list: _UpperCAmelCase : Any = old_item _UpperCAmelCase : Tuple = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _UpperCAmelCase : Any = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _UpperCAmelCase : Dict = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _UpperCAmelCase : List[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _UpperCAmelCase : List[Any] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None )-> List[Any]: '''simple docstring''' assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _UpperCAmelCase : Union[str, Any] = old_checkpoint[path] _UpperCAmelCase : Optional[int] = old_tensor.shape[0] // 3 _UpperCAmelCase : Optional[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _UpperCAmelCase : List[Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _UpperCAmelCase : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = old_tensor.split(channels // num_heads , dim=1 ) _UpperCAmelCase : Any = query.reshape(lowerCAmelCase_ ) _UpperCAmelCase : Any = key.reshape(lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = value.reshape(lowerCAmelCase_ ) for path in paths: _UpperCAmelCase : Tuple = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _UpperCAmelCase : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _UpperCAmelCase : int = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _UpperCAmelCase : int = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _UpperCAmelCase : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _UpperCAmelCase : str = old_checkpoint[path["""old"""]][:, :, 0] else: _UpperCAmelCase : Optional[int] = old_checkpoint[path["""old"""]] def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = {} _UpperCAmelCase : Tuple = checkpoint["""time_embed.0.weight"""] _UpperCAmelCase : List[str] = checkpoint["""time_embed.0.bias"""] _UpperCAmelCase : Optional[int] = checkpoint["""time_embed.2.weight"""] _UpperCAmelCase : Union[str, Any] = checkpoint["""time_embed.2.bias"""] _UpperCAmelCase : Any = checkpoint["""input_blocks.0.0.weight"""] _UpperCAmelCase : int = checkpoint["""input_blocks.0.0.bias"""] _UpperCAmelCase : str = checkpoint["""out.0.weight"""] _UpperCAmelCase : Optional[int] = checkpoint["""out.0.bias"""] _UpperCAmelCase : str = checkpoint["""out.2.weight"""] _UpperCAmelCase : Dict = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _UpperCAmelCase : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _UpperCAmelCase : int = { layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _UpperCAmelCase : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _UpperCAmelCase : int = { layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _UpperCAmelCase : List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _UpperCAmelCase : Optional[Any] = { layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _UpperCAmelCase : Any = (i - 1) // (config["""num_res_blocks"""] + 1) _UpperCAmelCase : Any = (i - 1) % (config["""num_res_blocks"""] + 1) _UpperCAmelCase : Any = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _UpperCAmelCase : Any = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _UpperCAmelCase : Dict = checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _UpperCAmelCase : Tuple = checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _UpperCAmelCase : Optional[Any] = renew_resnet_paths(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = {"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _UpperCAmelCase : Optional[int] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _UpperCAmelCase : Optional[Any] = renew_attention_paths(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = { """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _UpperCAmelCase : int = { F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _UpperCAmelCase : int = middle_blocks[0] _UpperCAmelCase : Dict = middle_blocks[1] _UpperCAmelCase : Optional[int] = middle_blocks[2] _UpperCAmelCase : Tuple = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _UpperCAmelCase : int = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _UpperCAmelCase : str = renew_attention_paths(lowerCAmelCase_ ) _UpperCAmelCase : List[Any] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _UpperCAmelCase : List[str] = i // (config["""num_res_blocks"""] + 1) _UpperCAmelCase : str = i % (config["""num_res_blocks"""] + 1) _UpperCAmelCase : List[Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _UpperCAmelCase : Optional[int] = {} for layer in output_block_layers: _UpperCAmelCase ,_UpperCAmelCase : Tuple = layer.split(""".""" )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _UpperCAmelCase : Optional[int] = [layer_name] if len(lowerCAmelCase_ ) > 1: _UpperCAmelCase : Tuple = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _UpperCAmelCase : Any = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _UpperCAmelCase : Dict = renew_resnet_paths(lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = renew_resnet_paths(lowerCAmelCase_ ) _UpperCAmelCase : Dict = {"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _UpperCAmelCase : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _UpperCAmelCase : Union[str, Any] = checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _UpperCAmelCase : str = checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _UpperCAmelCase : Any = [] if len(lowerCAmelCase_ ): _UpperCAmelCase : Union[str, Any] = renew_attention_paths(lowerCAmelCase_ ) _UpperCAmelCase : Tuple = { """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _UpperCAmelCase : Dict = { F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _UpperCAmelCase : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _UpperCAmelCase : Optional[Any] = """.""".join(["""output_blocks""", str(lowerCAmelCase_ ), path["""old"""]] ) _UpperCAmelCase : str = """.""".join(["""up_blocks""", str(lowerCAmelCase_ ), """resnets""", str(lowerCAmelCase_ ), path["""new"""]] ) _UpperCAmelCase : Optional[Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") A_ : List[str] = parser.parse_args() A_ : Any = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ : Union[str, Any] = json.loads(f.read()) A_ : List[Any] = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ : List[str] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ : Union[str, Any] = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1])) A_ : str = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1])) A_ : Any = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
215
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self ) -> Optional[Any]: _UpperCAmelCase : str = """laion/clap-htsat-unfused""" _UpperCAmelCase : int = tempfile.mkdtemp() def _snake_case ( self ,**a_ ) -> str: return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ ) def _snake_case ( self ,**a_ ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ ) def _snake_case ( self ) -> int: shutil.rmtree(self.tmpdirname ) def _snake_case ( self ) -> Optional[int]: _UpperCAmelCase : str = self.get_tokenizer() _UpperCAmelCase : Any = self.get_feature_extractor() _UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,a_ ) self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,a_ ) def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 ) _UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,a_ ) self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,a_ ) def _snake_case ( self ) -> str: _UpperCAmelCase : Tuple = self.get_feature_extractor() _UpperCAmelCase : Dict = self.get_tokenizer() _UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) _UpperCAmelCase : Tuple = floats_list((3, 1_000) ) _UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" ) _UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def _snake_case ( self ) -> Tuple: _UpperCAmelCase : List[Any] = self.get_feature_extractor() _UpperCAmelCase : Any = self.get_tokenizer() _UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) _UpperCAmelCase : Union[str, Any] = """This is a test string""" _UpperCAmelCase : Optional[Any] = processor(text=a_ ) _UpperCAmelCase : Any = tokenizer(a_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : str = self.get_feature_extractor() _UpperCAmelCase : List[str] = self.get_tokenizer() _UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) _UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase : Dict = processor.batch_decode(a_ ) _UpperCAmelCase : Any = tokenizer.batch_decode(a_ ) self.assertListEqual(a_ ,a_ ) def _snake_case ( self ) -> Dict: _UpperCAmelCase : List[str] = self.get_feature_extractor() _UpperCAmelCase : int = self.get_tokenizer() _UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) self.assertListEqual( processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
215
1
"""simple docstring""" import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = CanineTokenizer SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Dict: super().setUp() snake_case_ = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def a_ ( self) -> Optional[Any]: return CanineTokenizer.from_pretrained('google/canine-s') def a_ ( self, **lowerCAmelCase__) -> CanineTokenizer: snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase__) snake_case_ = 1024 return tokenizer @require_torch def a_ ( self) -> int: snake_case_ = self.canine_tokenizer snake_case_ = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.'] # fmt: off snake_case_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt') self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = list(batch.input_ids.numpy()[0]) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual((2, 39), batch.input_ids.shape) self.assertEqual((2, 39), batch.attention_mask.shape) @require_torch def a_ ( self) -> Any: snake_case_ = self.canine_tokenizer snake_case_ = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.'] snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt') # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('input_ids', lowerCAmelCase__) self.assertIn('attention_mask', lowerCAmelCase__) self.assertIn('token_type_ids', lowerCAmelCase__) @require_torch def a_ ( self) -> List[str]: snake_case_ = self.canine_tokenizer snake_case_ = [ 'What\'s the weater?', 'It\'s about 25 degrees.', ] snake_case_ = tokenizer( text_target=lowerCAmelCase__, max_length=32, padding='max_length', truncation=lowerCAmelCase__, return_tensors='pt') self.assertEqual(32, targets['input_ids'].shape[1]) def a_ ( self) -> Optional[Any]: # safety check on max_len default value so we are sure the test works snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): # Isolate this from the other tests because we save additional tokens/etc snake_case_ = tempfile.mkdtemp() snake_case_ = ' He is very happy, UNwant\u00E9d,running' snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) tokenizer.save_pretrained(lowerCAmelCase__) snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__) snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) shutil.rmtree(lowerCAmelCase__) snake_case_ = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): # Isolate this from the other tests because we save additional tokens/etc snake_case_ = tempfile.mkdtemp() snake_case_ = ' He is very happy, UNwant\u00E9d,running' snake_case_ = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: snake_case_ = chr(0xe_007) additional_special_tokens.append(lowerCAmelCase__) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens}) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) tokenizer.save_pretrained(lowerCAmelCase__) snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__) snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) self.assertIn(lowerCAmelCase__, after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(lowerCAmelCase__) def a_ ( self) -> str: snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): snake_case_ , snake_case_ = self.get_clean_sequence(lowerCAmelCase__) # a special token for Canine can be defined as follows: snake_case_ = 0xe_005 snake_case_ = chr(lowerCAmelCase__) tokenizer.add_special_tokens({'cls_token': special_token}) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertEqual(len(lowerCAmelCase__), 1) snake_case_ = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertEqual(lowerCAmelCase__, input_encoded + special_token_id) snake_case_ = tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__) self.assertTrue(special_token not in decoded) def a_ ( self) -> Union[str, Any]: snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): snake_case_ = chr(0xe_005) snake_case_ = chr(0xe_006) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=lowerCAmelCase__) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]}) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertEqual(len(lowerCAmelCase__), 1) self.assertEqual(len(lowerCAmelCase__), 1) self.assertEqual(token_a[0], lowerCAmelCase__) self.assertEqual(token_a[0], lowerCAmelCase__) @require_tokenizers def a_ ( self) -> Tuple: snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): # a special token for Canine can be defined as follows: snake_case_ = 0xe_006 snake_case_ = chr(lowerCAmelCase__) snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__) tokenizer.add_special_tokens({'additional_special_tokens': [new_token]}) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCAmelCase__) tokenizer.from_pretrained(lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCAmelCase__) with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), encoding='utf-8') as json_file: snake_case_ = json.load(lowerCAmelCase__) with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), encoding='utf-8') as json_file: snake_case_ = json.load(lowerCAmelCase__) # a special token for Canine can be defined as follows: snake_case_ = 0xe_006 snake_case_ = chr(lowerCAmelCase__) snake_case_ = [new_token_a] snake_case_ = [new_token_a] with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile: json.dump(lowerCAmelCase__, lowerCAmelCase__) with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile: json.dump(lowerCAmelCase__, lowerCAmelCase__) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case_ = tokenizer_class.from_pretrained(lowerCAmelCase__, extra_ids=0) self.assertIn(lowerCAmelCase__, tokenizer_without_change_in_init.additional_special_tokens) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a])), ) snake_case_ = 0xe_007 snake_case_ = chr(lowerCAmelCase__) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case_ = [AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__)] snake_case_ = tokenizer_class.from_pretrained( lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, extra_ids=0) self.assertIn(lowerCAmelCase__, tokenizer.additional_special_tokens) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a]))) @require_tokenizers def a_ ( self) -> Union[str, Any]: snake_case_ = self.get_tokenizers(do_lower_case=lowerCAmelCase__) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): snake_case_ = 'hello world' if self.space_between_special_tokens: snake_case_ = '[CLS] hello world [SEP]' else: snake_case_ = input snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.decode(lowerCAmelCase__, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(lowerCAmelCase__, [output, output.lower()]) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): snake_case_ = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] snake_case_ = 'a' snake_case_ = ord(lowerCAmelCase__) for attr in attributes_list: setattr(lowerCAmelCase__, attr + '_id', lowerCAmelCase__) self.assertEqual(getattr(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(getattr(lowerCAmelCase__, attr + '_id'), lowerCAmelCase__) setattr(lowerCAmelCase__, attr + '_id', lowerCAmelCase__) self.assertEqual(getattr(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(getattr(lowerCAmelCase__, attr + '_id'), lowerCAmelCase__) setattr(lowerCAmelCase__, 'additional_special_tokens_ids', []) self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens'), []) self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens_ids'), []) snake_case_ = 0xe_006 snake_case_ = chr(lowerCAmelCase__) setattr(lowerCAmelCase__, 'additional_special_tokens_ids', [additional_special_token_id]) self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens'), [additional_special_token]) self.assertListEqual(getattr(lowerCAmelCase__, 'additional_special_tokens_ids'), [additional_special_token_id]) def a_ ( self) -> List[Any]: pass def a_ ( self) -> List[Any]: pass def a_ ( self) -> Optional[int]: pass def a_ ( self) -> Tuple: pass def a_ ( self) -> Tuple: pass def a_ ( self) -> int: pass def a_ ( self) -> Optional[int]: pass def a_ ( self) -> int: pass
371
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
0
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint UpperCAmelCase__ = { '''169M''': 12, '''430M''': 24, '''1B5''': 24, '''3B''': 32, '''7B''': 32, '''14B''': 40, } UpperCAmelCase__ = { '''169M''': 768, '''430M''': 1024, '''1B5''': 2048, '''3B''': 2560, '''7B''': 4096, '''14B''': 5120, } def UpperCAmelCase_ ( __snake_case ) -> List[Any]: """simple docstring""" _lowercase =list(state_dict.keys() ) for name in state_dict_keys: _lowercase =state_dict.pop(__snake_case ) # emb -> embedding if name.startswith('''emb.''' ): _lowercase =name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): _lowercase =name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention _lowercase =re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __snake_case ) # ffn -> feed_forward _lowercase =re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __snake_case ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): _lowercase =name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): _lowercase =name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): _lowercase =name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": _lowercase ='''rwkv.''' + name _lowercase =weight return state_dict def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=None ) -> int: """simple docstring""" if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) _lowercase =50277 _lowercase =AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: _lowercase =PreTrainedTokenizerFast(tokenizer_file=__snake_case ) _lowercase =len(__snake_case ) tokenizer.save_pretrained(__snake_case ) # 2. Build the config _lowercase =list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _lowercase =candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." ) _lowercase =RwkvConfig( vocab_size=__snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(__snake_case ) # 3. Download model file then convert state_dict _lowercase =hf_hub_download(__snake_case , __snake_case ) _lowercase =torch.load(__snake_case , map_location='''cpu''' ) _lowercase =convert_state_dict(__snake_case ) # 4. Split in shards and save _lowercase , _lowercase =shard_checkpoint(__snake_case ) for shard_file, shard in shards.items(): torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) ) if index is not None: _lowercase =os.path.join(__snake_case , __snake_case ) # Save the index as well with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: _lowercase =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) _lowercase =list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _lowercase =torch.load(os.path.join(__snake_case , __snake_case ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__snake_case , __snake_case ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) _lowercase =AutoModelForCausalLM.from_pretrained(__snake_case ) model.push_to_hub(__snake_case , max_shard_size='''2GB''' ) tokenizer.push_to_hub(__snake_case ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) UpperCAmelCase__ = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
5
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( 'Warning: upper bound of deterministic test is exceeded. ' 'Pass allow_probable=True to allow probabilistic test. ' 'A return value of True indicates a probable prime.' ) # array bounds provided by analysis snake_case_ = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(UpperCAmelCase , 1 ): if n < _p: # then we have our last prime to check snake_case_ = primes[:idx] break snake_case_ , snake_case_ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: snake_case_ = False for r in range(UpperCAmelCase ): snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): snake_case_ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def UpperCAmelCase ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
69
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any]=13 , UpperCamelCase : List[Any]=32 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : str=[10, 20, 30, 40] , UpperCamelCase : Tuple=[2, 2, 3, 2] , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=10 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : int=["stage2", "stage3", "stage4"] , UpperCamelCase : Optional[Any]=[2, 3, 4] , UpperCamelCase : Optional[int]=None , ): '''simple docstring''' _snake_case : int = parent _snake_case : List[Any] = batch_size _snake_case : Tuple = image_size _snake_case : Tuple = num_channels _snake_case : Optional[int] = num_stages _snake_case : List[str] = hidden_sizes _snake_case : Dict = depths _snake_case : Dict = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : Any = hidden_act _snake_case : List[Any] = num_labels _snake_case : Optional[int] = initializer_range _snake_case : Tuple = out_features _snake_case : Union[str, Any] = out_indices _snake_case : Any = scope def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[Any] = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size] , self.num_labels ) _snake_case : Optional[int] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ): '''simple docstring''' _snake_case : Dict = ConvNextModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() _snake_case : Tuple = model(UpperCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' _snake_case : List[str] = ConvNextForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() _snake_case : Optional[Any] = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[str] ): '''simple docstring''' _snake_case : List[Any] = ConvNextBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() _snake_case : Optional[Any] = model(UpperCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Optional[Any] = None _snake_case : Dict = ConvNextBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() _snake_case : Union[str, Any] = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self : int ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case : Any = config_and_inputs _snake_case : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' a_ : List[str] =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) a_ : Optional[int] =( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) a_ : Optional[int] =True a_ : Any =False a_ : Dict =False a_ : Dict =False a_ : List[str] =False def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _snake_case : str = ConvNextModelTester(self ) _snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def UpperCamelCase_ ( self : str ): '''simple docstring''' pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(UpperCamelCase ) _snake_case : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Dict = [*signature.parameters.keys()] _snake_case : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase ) def UpperCamelCase_ ( self : str ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ): _snake_case : List[str] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): _snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : int = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCamelCase_ ( self : Any ): '''simple docstring''' for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Dict = ConvNextModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def lowerCamelCase_ ( )-> Union[str, Any]: _snake_case : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self : str ): '''simple docstring''' _snake_case : Any = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase ) _snake_case : Dict = self.default_image_processor _snake_case : int = prepare_img() _snake_case : List[str] = image_processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase ) # forward pass with torch.no_grad(): _snake_case : str = model(**UpperCamelCase ) # verify the logits _snake_case : List[str] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) _snake_case : Any = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @require_torch class _lowerCAmelCase ( unittest.TestCase , UpperCAmelCase_ ): '''simple docstring''' a_ : List[Any] =(ConvNextBackbone,) if is_torch_available() else () a_ : Optional[Any] =ConvNextConfig a_ : Optional[Any] =False def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _snake_case : Tuple = ConvNextModelTester(self )
366
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase_ = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase_ = 0 lowerCAmelCase_ = 0xE_000 lowerCAmelCase_ = 0xE_001 lowerCAmelCase_ = 0xE_002 lowerCAmelCase_ = 0xE_003 lowerCAmelCase_ = 0xE_004 # Maps special codepoints to human-readable names. lowerCAmelCase_ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , UpperCamelCase : int=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : Any=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : List[Any]=chr(UpperCamelCase ) , UpperCamelCase : List[str]=chr(UpperCamelCase ) , UpperCamelCase : int=False , UpperCamelCase : str=20_48 , **UpperCamelCase : List[str] , ): '''simple docstring''' _snake_case : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token _snake_case : Optional[Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token _snake_case : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token _snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token _snake_case : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , model_max_length=UpperCamelCase , **UpperCamelCase , ) # Creates a mapping for looking up the IDs of special symbols. _snake_case : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): _snake_case : Tuple = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. _snake_case : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } _snake_case : str = UNICODE_VOCAB_SIZE _snake_case : Optional[Any] = len(self._special_codepoints ) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' return self._unicode_vocab_size def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' return list(UpperCamelCase ) def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str ): '''simple docstring''' try: return ord(UpperCamelCase ) except TypeError: raise ValueError(f"""invalid token: '{token}'""" ) def UpperCamelCase_ ( self : Dict , UpperCamelCase : int ): '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase ) except TypeError: raise ValueError(f"""invalid id: {index}""" ) def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] ): '''simple docstring''' return "".join(UpperCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Optional[Any] = [self.sep_token_id] _snake_case : int = [self.cls_token_id] _snake_case : Any = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) _snake_case : int = [1] + ([0] * len(UpperCamelCase )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase )) + [1] return result def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[Any] = [self.sep_token_id] _snake_case : Dict = [self.cls_token_id] _snake_case : Tuple = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' return ()
260
0
'''simple docstring''' class snake_case__ : def __init__( self : List[Any] , _A : Optional[int] ) -> Optional[int]: # we need a list not a string, so do something to change the type UpperCAmelCase_ : Any = arr.split(''',''' ) def A ( self : Tuple ) -> List[str]: UpperCAmelCase_ : int = [int(self.array[0] )] * len(self.array ) UpperCAmelCase_ : Optional[int] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): UpperCAmelCase_ : List[Any] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) UpperCAmelCase_ : Tuple = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": _UpperCamelCase : Optional[int] = input('please input some numbers:') _UpperCamelCase : Union[str, Any] = SubArray(whole_array) _UpperCamelCase : Optional[Any] = array.solve_sub_array() print(('the results is:', re))
304
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Union[str, Any] = logging.get_logger(__name__) snake_case__ : Optional[int] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class A_ ( _lowerCamelCase ): lowerCAmelCase__ = """donut-swin""" lowerCAmelCase__ = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__(self :Optional[int] , _UpperCamelCase :int=224 , _UpperCamelCase :int=4 , _UpperCamelCase :Any=3 , _UpperCamelCase :List[Any]=96 , _UpperCamelCase :Any=[2, 2, 6, 2] , _UpperCamelCase :Optional[int]=[3, 6, 12, 24] , _UpperCamelCase :int=7 , _UpperCamelCase :Optional[int]=4.0 , _UpperCamelCase :str=True , _UpperCamelCase :str=0.0 , _UpperCamelCase :str=0.0 , _UpperCamelCase :Optional[Any]=0.1 , _UpperCamelCase :Optional[Any]="gelu" , _UpperCamelCase :Dict=False , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :Tuple=1e-5 , **_UpperCamelCase :Any , )-> List[Any]: super().__init__(**_UpperCamelCase ) __A = image_size __A = patch_size __A = num_channels __A = embed_dim __A = depths __A = len(_UpperCamelCase ) __A = num_heads __A = window_size __A = mlp_ratio __A = qkv_bias __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = drop_path_rate __A = hidden_act __A = use_absolute_embeddings __A = layer_norm_eps __A = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __A = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
250
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification snake_case__ : Dict = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co snake_case__ : Any = 'main' # Default branch name snake_case__ : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2' # One particular commit (not the top of `main`) snake_case__ : Optional[int] = 'aaaaaaa' # This commit does not exist, so we should 404. snake_case__ : int = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684' # Sha-1 of config.json on the top of `main`, for checking purposes snake_case__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3' @contextlib.contextmanager def _a ( ) -> Tuple: '''simple docstring''' print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def _a ( ) -> Optional[int]: '''simple docstring''' print('''Bonjour!''' ) yield print('''Au revoir!''' ) class A_ ( unittest.TestCase ): def _lowerCAmelCase (self :Any )-> Optional[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''' ) is not None class A_ ( unittest.TestCase ): @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def _lowerCAmelCase (self :str , _UpperCamelCase :str )-> Optional[int]: with ContextManagers([] ): print('''Transformers are awesome!''' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Union[str, Any]: with ContextManagers([context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def _lowerCAmelCase (self :int , _UpperCamelCase :Union[str, Any] )-> int: with ContextManagers([context_fr(), context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' ) @require_torch def _lowerCAmelCase (self :int )-> str: self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] ) self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] ) class A_ ( _lowerCamelCase ): pass self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] ) @require_tf def _lowerCAmelCase (self :Any )-> str: self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] ) self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] ) class A_ ( _lowerCamelCase ): pass self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] ) @require_flax def _lowerCAmelCase (self :Optional[int] )-> Dict: # Flax models don't have labels self.assertEqual(find_labels(_UpperCamelCase ) , [] ) self.assertEqual(find_labels(_UpperCamelCase ) , [] ) self.assertEqual(find_labels(_UpperCamelCase ) , [] ) class A_ ( _lowerCamelCase ): pass self.assertEqual(find_labels(_UpperCamelCase ) , [] )
250
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> int: _A : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") _A : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(__lowerCamelCase) _A : str = -1 _A : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowerCamelCase) _A : str = model.generate(__lowerCamelCase , max_new_tokens=1_0 , do_sample=__lowerCamelCase) _A : List[Any] = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _A : int = TextStreamer(__lowerCamelCase) model.generate(__lowerCamelCase , max_new_tokens=1_0 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _A : Optional[int] = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") _A : Any = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(__lowerCamelCase) _A : Optional[int] = -1 _A : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowerCamelCase) _A : str = model.generate(__lowerCamelCase , max_new_tokens=1_0 , do_sample=__lowerCamelCase) _A : Tuple = tokenizer.decode(greedy_ids[0]) _A : Any = TextIteratorStreamer(__lowerCamelCase) _A : Optional[Any] = {"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer} _A : Optional[Any] = Thread(target=model.generate , kwargs=__lowerCamelCase) thread.start() _A : Union[str, Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: _A : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") _A : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(__lowerCamelCase) _A : Union[str, Any] = -1 _A : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowerCamelCase) _A : Optional[int] = model.generate(__lowerCamelCase , max_new_tokens=1_0 , do_sample=__lowerCamelCase) _A : List[str] = greedy_ids[:, input_ids.shape[1] :] _A : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _A : List[str] = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase) model.generate(__lowerCamelCase , max_new_tokens=1_0 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _A : Any = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Any: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _A : List[str] = AutoTokenizer.from_pretrained("distilgpt2") _A : Any = AutoModelForCausalLM.from_pretrained("distilgpt2").to(__lowerCamelCase) _A : Any = -1 _A : str = torch.ones((1, 5) , device=__lowerCamelCase).long() * model.config.bos_token_id with CaptureStdout() as cs: _A : List[Any] = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _A : Union[str, Any] = cs.out[:-1] # Remove the final "\n" _A : Optional[Any] = tokenizer(__lowerCamelCase , return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCamelCase ( self) -> int: _A : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") _A : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(__lowerCamelCase) _A : List[str] = -1 _A : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__lowerCamelCase) _A : Any = TextIteratorStreamer(__lowerCamelCase , timeout=0.0_0_1) _A : List[Any] = {"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer} _A : str = Thread(target=model.generate , kwargs=__lowerCamelCase) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase): _A : Optional[Any] = "" for new_text in streamer: streamer_text += new_text
11
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets __lowerCAmelCase = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' __lowerCAmelCase = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' __lowerCAmelCase = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def __lowercase ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,) def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int=None ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : int="binary" ,_UpperCAmelCase : List[Any]=None ): _a : Any = fa_score( snake_case__ ,snake_case__ ,labels=snake_case__ ,pos_label=snake_case__ ,average=snake_case__ ,sample_weight=snake_case__ ) return {"f1": float(snake_case__ ) if score.size == 1 else score}
362
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _a : Dict = 'backbone.' if is_semantic else '' _a : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", 'beit.embeddings.cls_token'), (f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'), (f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'), (f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> int: for i in range(config.num_hidden_layers ): _a : Union[str, Any] = 'backbone.' if is_semantic else '' # queries, keys and values _a : Any = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) _a : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) _a : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) _a : int = in_proj_weight[ : config.hidden_size, : ] _a : str = q_bias _a : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a : List[str] = in_proj_weight[ -config.hidden_size :, : ] _a : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained _a : int = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) _a : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) _a : Tuple = gamma_a _a : Optional[Any] = gamma_a def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _a : List[Any] = dct.pop(lowerCAmelCase_ ) _a : List[Any] = val def __lowerCamelCase ( ) -> Dict: _a : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' _a : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]: _a : Optional[Any] = False if 'rvlcdip' in checkpoint_url else True _a : List[Any] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: _a : Optional[Any] = 1024 _a : Tuple = 4096 _a : Any = 24 _a : Optional[int] = 16 # labels if "rvlcdip" in checkpoint_url: _a : Any = 16 _a : Tuple = 'huggingface/label-files' _a : List[Any] = 'rvlcdip-id2label.json' _a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : int = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _a : int = idalabel _a : int = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys _a : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='cpu' )['model'] _a : Dict = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ ) # load HuggingFace model _a : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ ) model.eval() model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image _a : Dict = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ ) _a : Dict = prepare_img() _a : str = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ) _a : Optional[Any] = encoding['pixel_values'] _a : Optional[Any] = model(lowerCAmelCase_ ) _a : Optional[Any] = outputs.logits # verify logits _a : int = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected" Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: if has_lm_head: _a : Tuple = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: _a : int = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase_ , ) model.push_to_hub( repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase_ , ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) __lowerCAmelCase = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
107
0
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_ : int = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (snake_case_ ): """simple docstring""" __a =['input_features'] def __init__( self : List[Any] , __a : Optional[Any]=80 , __a : List[Any]=1_60_00 , __a : List[str]=1_60 , __a : str=30 , __a : str=4_00 , __a : Optional[int]=0.0 , __a : Union[str, Any]=False , **__a : List[Any] , ): super().__init__( feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , ) _a = n_fft _a = hop_length _a = chunk_length _a = chunk_length * sampling_rate _a = self.n_samples // hop_length _a = sampling_rate _a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , ) def UpperCamelCase__ ( self : str , __a : np.array ): _a = spectrogram( __a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) _a = log_spec[:, :-1] _a = np.maximum(__a , log_spec.max() - 8.0 ) _a = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCamelCase__ ( __a : List[np.ndarray] , __a : List[np.ndarray] , __a : float = 0.0 ): if attention_mask is not None: _a = np.array(__a , np.intaa ) _a = [] for vector, length in zip(__a , attention_mask.sum(-1 ) ): _a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: _a = padding_value normed_input_values.append(__a ) else: _a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[Any] , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : bool = True , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , __a : Optional[str] = "max_length" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[bool] = None , **__a : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _a = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) _a = is_batched_numpy or ( isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__a , np.ndarray ): _a = np.asarray(__a , dtype=np.floataa ) elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _a = [np.asarray([raw_speech] ).T] _a = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding _a = self.pad( __a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _a = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) _a = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format _a = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) _a = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]] if isinstance(input_features[0] , __a ): _a = [np.asarray(__a , dtype=np.floataa ) for feature in input_features] else: _a = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _a = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: _a = padded_inputs.convert_to_tensors(__a ) return padded_inputs def UpperCamelCase__ ( self : Optional[Any] ): _a = copy.deepcopy(self.__dict__ ) _a = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
63
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__:str = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:str = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Optional[int] = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:int = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Optional[int] = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:List[Any] = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
365
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:str = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class snake_case__ ( snake_case_ ): _snake_case : str = """sew-d""" def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("p2c", "c2p") , lowerCamelCase="layer_norm" , lowerCamelCase="gelu_python" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-7 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ): super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase ) __a = hidden_size __a = feat_extract_norm __a = feat_extract_activation __a = list(lowerCamelCase ) __a = list(lowerCamelCase ) __a = list(lowerCamelCase ) __a = conv_bias __a = num_conv_pos_embeddings __a = num_conv_pos_embedding_groups __a = len(self.conv_dim ) __a = num_hidden_layers __a = intermediate_size __a = squeeze_factor __a = max_position_embeddings __a = position_buckets __a = share_att_key __a = relative_attention __a = norm_rel_ebd __a = list(lowerCamelCase ) __a = hidden_act __a = num_attention_heads __a = hidden_dropout __a = attention_dropout __a = activation_dropout __a = feat_proj_dropout __a = final_dropout __a = layer_norm_eps __a = feature_layer_norm_eps __a = initializer_range __a = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a = apply_spec_augment __a = mask_time_prob __a = mask_time_length __a = mask_time_min_masks __a = mask_feature_prob __a = mask_feature_length __a = mask_feature_min_masks # ctc loss __a = ctc_loss_reduction __a = ctc_zero_infinity # sequence classification __a = use_weighted_layer_sum __a = classifier_proj_size @property def a__ ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
268
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Optional[int] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = '''open-llama''' def __init__(self : Any , _lowerCAmelCase : int=10_0000 , _lowerCAmelCase : str=4096 , _lowerCAmelCase : Optional[int]=1_1008 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : str=32 , _lowerCAmelCase : str="silu" , _lowerCAmelCase : Optional[Any]=2048 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : List[str]=1e-6 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=2 , _lowerCAmelCase : int=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Tuple , ): A = vocab_size A = max_position_embeddings A = hidden_size A = intermediate_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = initializer_range A = rms_norm_eps A = use_cache A = kwargs.pop( """use_memorry_efficient_attention""" , __UpperCamelCase ) A = hidden_dropout_prob A = attention_dropout_prob A = use_stable_embedding A = shared_input_output_embedding A = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , ) def A (self : Optional[int] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F"""got {self.rope_scaling}""" ) A = self.rope_scaling.get("""type""" , __UpperCamelCase ) A = self.rope_scaling.get("""factor""" , __UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { 'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json', # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCamelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = 'dinat' SCREAMING_SNAKE_CASE_ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : str ,__lowerCamelCase : str=4 ,__lowerCamelCase : Any=3 ,__lowerCamelCase : str=64 ,__lowerCamelCase : int=[3, 4, 6, 5] ,__lowerCamelCase : Union[str, Any]=[2, 4, 8, 16] ,__lowerCamelCase : Optional[int]=7 ,__lowerCamelCase : int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,__lowerCamelCase : Any=3.0 ,__lowerCamelCase : int=True ,__lowerCamelCase : List[str]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : int=0.1 ,__lowerCamelCase : str="gelu" ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : Dict=1e-5 ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Dict=None ,__lowerCamelCase : Optional[Any]=None ,**__lowerCamelCase : int ,): '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) a = patch_size a = num_channels a = embed_dim a = depths a = len(_SCREAMING_SNAKE_CASE ) a = num_heads a = kernel_size a = dilations a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) ) a = layer_scale_init_value a = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )] a = get_aligned_output_features_output_indices( out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names )
364
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring UpperCamelCase__ : List[Any] = """RegNetConfig""" # Base docstring UpperCamelCase__ : Dict = """facebook/regnet-y-040""" UpperCamelCase__ : int = [1, 1_088, 7, 7] # Image classification docstring UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040""" UpperCamelCase__ : Dict = """tabby, tabby cat""" UpperCamelCase__ : Dict = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) a = ACTaFN[activation] if activation is not None else tf.identity def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ): '''simple docstring''' a = self.convolution(self.padding(__lowerCamelCase ) ) a = self.normalization(__lowerCamelCase ) a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config.num_channels a = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = shape_list(__lowerCamelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) ) a = self.embedder(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ): '''simple docstring''' return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) a = [ tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ), ] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = self.pooler(__lowerCamelCase ) for layer_module in self.attention: a = layer_module(__lowerCamelCase ) a = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer a = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ), *[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ): '''simple docstring''' for layer_module in self.layers: a = layer_module(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) ) a = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ): '''simple docstring''' a = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a = hidden_states + (hidden_state,) a = stage_module(__lowerCamelCase ) if output_hidden_states: a = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): SCREAMING_SNAKE_CASE_ = RegNetConfig def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' ) a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) @unpack_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase ) a = self.encoder( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = encoder_outputs[0] a = self.pooler(__lowerCamelCase ) # Change to NCHW output format have uniformity in the modules a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = RegNetConfig SCREAMING_SNAKE_CASE_ = 'regnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} UpperCamelCase__ : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase__ : List[str] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , a_ , ) class lowerCamelCase_ ( a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , ) class lowerCamelCase_ ( a_ , a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = config.num_labels a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) # classification head a = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier[0](__lowerCamelCase ) a = self.classifier[1](__lowerCamelCase ) a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
330
0
def _a ( lowerCamelCase ): if p < 2: raise ValueError("""p should not be less than 2!""" ) elif p == 2: return True lowerCamelCase : List[Any] = 4 lowerCamelCase : List[Any] = (1 << p) - 1 for _ in range(p - 2 ): lowerCamelCase : List[str] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
287
"""simple docstring""" import math def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
249
0
import datasets from .evaluate import evaluate UpperCamelCase_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" UpperCamelCase_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" UpperCamelCase_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def __a ( self :Optional[int]) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string'''), '''prediction_text''': datasets.Value('''string''')}, '''references''': { '''id''': datasets.Value('''string'''), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string'''), '''answer_start''': datasets.Value('''int32'''), }), }, }) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , ) def __a ( self :str , _lowercase :Dict , _lowercase :Optional[int]) -> Dict: UpperCAmelCase_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} UpperCAmelCase_ = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] UpperCAmelCase_ = evaluate(dataset=_lowercase , predictions=_lowercase) return score
344
import functools def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' UpperCAmelCase_ = len(__UpperCAmelCase ) UpperCAmelCase_ = len(__UpperCAmelCase ) @functools.cache def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
344
1
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __lowerCAmelCase : List[str] = logging.get_logger(__name__) def __magic_name__ ( A : Dict, A : int, A : Optional[int] ): '''simple docstring''' return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def __magic_name__ ( A : np.ndarray, A : Optional[str], A : Optional[str] = None ): '''simple docstring''' a = tesseract_config if tesseract_config is not None else "" # apply OCR a = to_pil_image(A ) a , a = pil_image.size a = pytesseract.image_to_data(A, lang=A, output_type="dict", config=A ) a , a , a , a , a = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates a = [idx for idx, word in enumerate(A ) if not word.strip()] a = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices] a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format a = [] for x, y, w, h in zip(A, A, A, A ): a = [x, y, x + w, y + h] actual_boxes.append(A ) # finally, normalize the bounding boxes a = [] for box in actual_boxes: normalized_boxes.append(normalize_box(A, A, A ) ) assert len(A ) == len(A ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = ["""pixel_values"""] def __init__( self : int , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "" , **__lowerCamelCase : Tuple , ) -> None: super().__init__(**__lowerCamelCase ) a = size if size is not None else {"height": 2_24, "width": 2_24} a = get_size_dict(__lowerCamelCase ) a = do_resize a = size a = resample a = apply_ocr a = ocr_lang a = tesseract_config def __UpperCAmelCase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> np.ndarray: a = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) a = (size["height"], size["width"]) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ) -> PIL.Image.Image: a = do_resize if do_resize is not None else self.do_resize a = size if size is not None else self.size a = get_size_dict(__lowerCamelCase ) a = resample if resample is not None else self.resample a = apply_ocr if apply_ocr is not None else self.apply_ocr a = ocr_lang if ocr_lang is not None else self.ocr_lang a = tesseract_config if tesseract_config is not None else self.tesseract_config a = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) # All transformations expect numpy arrays. a = [to_numpy_array(__lowerCamelCase ) for image in images] if apply_ocr: requires_backends(self , "pytesseract" ) a = [] a = [] for image in images: a , a = apply_tesseract(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) words_batch.append(__lowerCamelCase ) boxes_batch.append(__lowerCamelCase ) if do_resize: a = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) a = [flip_channel_order(__lowerCamelCase ) for image in images] a = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] a = BatchFeature(data={"pixel_values": images} , tensor_type=__lowerCamelCase ) if apply_ocr: a = words_batch a = boxes_batch return data
107
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ = {'tokenization_bertweet': ['BertweetTokenizer']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
203
"""simple docstring""" import unittest from knapsack import knapsack as k class __snake_case ( unittest.TestCase ): def lowerCamelCase_ ( self) -> Dict: '''simple docstring''' a__: List[Any] = 0 a__: Dict = [0] a__: int = [0] a__: Optional[Any] = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0) a__: str = [60] a__: Dict = [10] a__: List[str] = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0) def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: int = 3 a__: str = [1, 2, 3] a__: Dict = [3, 2, 1] a__: Optional[int] = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 5) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Any = 50 a__: Optional[int] = [60, 1_00, 1_20] a__: str = [10, 20, 30] a__: int = len(lowercase) self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 2_20) if __name__ == "__main__": unittest.main()
203
1
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _snake_case = logging.get_logger(__name__) _snake_case = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class a__ : def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ): """simple docstring""" logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) _lowercase : Optional[Any] = model _lowercase : Optional[Any] = kwargs.get("model_save_dir" , lowerCAmelCase_ ) _lowercase : Any = kwargs.get("latest_model_name" , lowerCAmelCase_ ) def __call__( self , **_UpperCamelCase ): """simple docstring""" _lowercase : Any = {k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()} return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ ) @staticmethod def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ): """simple docstring""" if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) _lowercase : Tuple = "CPUExecutionProvider" return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): """simple docstring""" _lowercase : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME _lowercase : Any = self.model_save_dir.joinpath(self.latest_model_name ) _lowercase : Union[str, Any] = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ ) try: shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) _lowercase : List[Any] = self.model_save_dir.joinpath(lowerCAmelCase_ ) if src_path.exists(): _lowercase : Tuple = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ ) try: shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ ) except shutil.SameFileError: pass def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase , ): """simple docstring""" if os.path.isfile(lowerCAmelCase_ ): logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) # saving model weights/files self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" _lowercase : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCAmelCase_ ): _lowercase : Optional[int] = OnnxRuntimeModel.load_model( os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ ) _lowercase : Dict = Path(lowerCAmelCase_ ) # load model from hub else: # download model _lowercase : Any = hf_hub_download( repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , ) _lowercase : Union[str, Any] = Path(lowerCAmelCase_ ).parent _lowercase : Dict = Path(lowerCAmelCase_ ).name _lowercase : int = OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ ) return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" _lowercase : str = None if len(str(lowerCAmelCase_ ).split("@" ) ) == 2: _lowercase : Dict = model_id.split("@" ) return cls._from_pretrained( model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
250
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : str = image_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Tuple = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : int = depths UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : str = scope UpperCAmelCase_ : str = len(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[Any] = layer_type UpperCAmelCase_ : int = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ): UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple() def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ): if isinstance(lowerCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
268
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = torch.device("cpu") def __snake_case ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] ) def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> int: '''simple docstring''' _UpperCAmelCase : List[str] = dct.pop(SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase : Any = val def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : int = [] for k in state_dict.keys(): _UpperCAmelCase : int = k if ".pwconv" in k: _UpperCAmelCase : List[str] = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: _UpperCAmelCase : Union[str, Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: _UpperCAmelCase : Dict = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: _UpperCAmelCase : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: _UpperCAmelCase : Optional[int] = k_new.split("." ) if ls[2].isdigit(): _UpperCAmelCase : List[str] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: _UpperCAmelCase : str = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]: '''simple docstring''' _UpperCAmelCase : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _UpperCAmelCase : Optional[int] = 1_000 _UpperCAmelCase : str = "huggingface/label-files" _UpperCAmelCase : List[Any] = "imagenet-1k-id2label.json" _UpperCAmelCase : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} _UpperCAmelCase : Optional[int] = idalabel _UpperCAmelCase : Any = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _UpperCAmelCase : Tuple = [3, 3, 6, 4] _UpperCAmelCase : str = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": _UpperCAmelCase : Dict = [3, 3, 9, 6] _UpperCAmelCase : Union[str, Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": _UpperCAmelCase : Tuple = [4, 3, 10, 5] _UpperCAmelCase : List[Any] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": _UpperCAmelCase : Tuple = [4, 4, 12, 6] _UpperCAmelCase : Dict = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): _UpperCAmelCase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE__ ) else: _UpperCAmelCase : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" ) _UpperCAmelCase : str = checkpoint _UpperCAmelCase : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load HuggingFace model _UpperCAmelCase : Union[str, Any] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # prepare test inputs _UpperCAmelCase : Tuple = prepare_img() _UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("preprocessor_config" ) _UpperCAmelCase : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) # compare outputs from both models _UpperCAmelCase : List[Any] = get_expected_output(SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase : str = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") _lowerCAmelCase : Optional[int] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
202
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7 ) -> Tuple: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = None if token is not None: _UpperCAmelCase : str = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'} # The id of a workflow (not of a workflow run) _UpperCAmelCase : Any = "636036" _UpperCAmelCase : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' _UpperCAmelCase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json() return result["workflow_runs"] def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : int = get_daily_ci_runs(SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _UpperCAmelCase : str = workflow_run["id"] break return workflow_run_id def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase : List[str] = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE__ ) if workflow_run_id is not None: _UpperCAmelCase : Any = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _UpperCAmelCase : List[str] = artifacts_links[artifact_name] download_artifact( artifact_name=SCREAMING_SNAKE_CASE__ , artifact_url=SCREAMING_SNAKE_CASE__ , output_dir=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase : Any = {} for artifact_name in artifact_names: _UpperCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , f'{artifact_name}.zip' ) if os.path.isfile(SCREAMING_SNAKE_CASE__ ): _UpperCAmelCase : str = {} with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): # read the file with z.open(SCREAMING_SNAKE_CASE__ ) as f: _UpperCAmelCase : List[str] = f.read().decode("UTF-8" ) return results
202
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class a_ : '''simple docstring''' def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> int: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope def snake_case_( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_( self ) -> Any: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) def snake_case_( self , A , A , A , A , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = NystromformerModel(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A ) _SCREAMING_SNAKE_CASE = model(A , token_type_ids=A ) _SCREAMING_SNAKE_CASE = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_( self , A , A , A , A , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_( self , A , A , A , A , A , A , A ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model( A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_( self , A , A , A , A , A , A , A ) -> Any: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_( self , A , A , A , A , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = model( A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a_ ( snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = NystromformerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , hidden_size=37 ) def snake_case_( self ) -> int: self.config_tester.run_common_tests() def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*A ) def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def snake_case_( self ) -> int: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) _SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(A )[0] _SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , A ) _SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) ) @slow def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = """the [MASK] of Belgium is Brussels""" _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) _SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) _SCREAMING_SNAKE_CASE = tokenizer(A , return_tensors="""pt""" ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits _SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(A ) , """capital""" )
58
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
import argparse import os import re lowercase : Optional[Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowercase : str = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowercase : str = re.compile(R'\s*\(\s*\"(\S[^\"]+)\"') def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Any = False) -> Dict: '''simple docstring''' with open(_lowerCAmelCase , "r" , encoding="utf-8") as f: __UpperCamelCase : Dict = f.read() __UpperCamelCase : Union[str, Any] = content.split("\n") __UpperCamelCase : Optional[int] = [] __UpperCamelCase : Union[str, Any] = 0 while line_idx < len(_lowerCAmelCase): if _re_intro_mapping.search(lines[line_idx]) is not None: __UpperCamelCase : Tuple = len(re.search(R"^(\s*)\S" , lines[line_idx]).groups()[0]) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "("): new_lines.append(lines[line_idx]) line_idx += 1 __UpperCamelCase : str = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": __UpperCamelCase : List[Any] = line_idx while not lines[line_idx].startswith(" " * indent + ")"): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1])) else: blocks.append(lines[line_idx]) line_idx += 1 # Sort blocks by their identifiers __UpperCamelCase : str = sorted(_lowerCAmelCase , key=lambda _lowerCamelCase: _re_identifier.search(_lowerCAmelCase).groups()[0]) new_lines += blocks else: new_lines.append(lines[line_idx]) line_idx += 1 if overwrite: with open(_lowerCAmelCase , "w" , encoding="utf-8") as f: f.write("\n".join(_lowerCAmelCase)) elif "\n".join(_lowerCAmelCase) != content: return True def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = False) -> Tuple: '''simple docstring''' __UpperCamelCase : List[Any] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase) for f in os.listdir(_lowerCAmelCase) if f.endswith(".py")] __UpperCamelCase : Dict = [sort_auto_mapping(_lowerCAmelCase , overwrite=_lowerCAmelCase) for fname in fnames] if not overwrite and any(_lowerCAmelCase): __UpperCamelCase : Union[str, Any] = [f for f, d in zip(_lowerCAmelCase , _lowerCAmelCase) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(_lowerCAmelCase)}. Run `make style` to fix' " this.") if __name__ == "__main__": lowercase : List[Any] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowercase : Any = parser.parse_args() sort_all_auto_mappings(not args.check_only)
354
import numpy as np def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array) -> np.array: '''simple docstring''' return (2 / (1 + np.exp(-2 * vector))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
151
0
'''simple docstring''' import datasets from .evaluate import evaluate UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} A_ : List[Any] = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase ) return score
344
'''simple docstring''' def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(a_ ): for j in range(a_ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )] for i in range(a_ ): for j in range(a_ ): A_ : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(a_ ): # looping through rows of graph array for i in range(a_ ): # looping through columns of graph array for j in range(a_ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): A_ : List[str] = dist[i][k] + dist[k][j] _print_dist(a_ , a_ ) return dist, v if __name__ == "__main__": UpperCamelCase__ : Tuple = int(input('Enter number of vertices: ')) UpperCamelCase__ : int = int(input('Enter number of edges: ')) UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): UpperCamelCase__ : Union[str, Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) UpperCamelCase__ : Union[str, Any] = int(input('Enter source:')) UpperCamelCase__ : int = int(input('Enter destination:')) UpperCamelCase__ : Optional[Any] = float(input('Enter weight:')) UpperCamelCase__ : Any = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
344
1
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,__lowerCamelCase=10_00 ,) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : Dict = batch_size lowerCAmelCase__ : str = seq_length lowerCAmelCase__ : Any = is_training lowerCAmelCase__ : Optional[Any] = use_input_mask lowerCAmelCase__ : Optional[Any] = use_token_type_ids lowerCAmelCase__ : Tuple = use_labels lowerCAmelCase__ : Optional[int] = vocab_size lowerCAmelCase__ : Tuple = hidden_size lowerCAmelCase__ : Dict = num_hidden_layers lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Tuple = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[int] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Any = max_position_embeddings lowerCAmelCase__ : int = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : Union[str, Any] = num_labels lowerCAmelCase__ : str = num_choices lowerCAmelCase__ : Dict = scope lowerCAmelCase__ : List[str] = range_bbox def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase__ : Optional[int] = bbox[i, j, 3] lowerCAmelCase__ : Optional[Any] = bbox[i, j, 1] lowerCAmelCase__ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase__ : List[Any] = bbox[i, j, 2] lowerCAmelCase__ : Tuple = bbox[i, j, 0] lowerCAmelCase__ : Optional[int] = t lowerCAmelCase__ : Any = tf.convert_to_tensor(_snake_case ) lowerCAmelCase__ : Optional[Any] = None if self.use_input_mask: lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : str = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCAmelCase__ : Any = None lowerCAmelCase__ : int = None lowerCAmelCase__ : Dict = None if self.use_labels: lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Optional[Any] = LayoutLMConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Any: """simple docstring""" lowerCAmelCase__ : Tuple = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase__ : Optional[Any] = model(_snake_case ,_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ) lowerCAmelCase__ : Any = model(_snake_case ,_snake_case ,token_type_ids=_snake_case ) lowerCAmelCase__ : Optional[int] = model(_snake_case ,_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[Any] = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase__ : int = model(_snake_case ,_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Optional[Any] = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase__ : Dict = model(_snake_case ,_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : Tuple = self.num_labels lowerCAmelCase__ : List[str] = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase__ : Optional[int] = model(_snake_case ,_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : int = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase__ : Any = model(_snake_case ,_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Dict = config_and_inputs lowerCAmelCase__ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase): '''simple docstring''' snake_case_ =( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case_ =( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case_ =False snake_case_ =True snake_case_ =10 def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[Any] = TFLayoutLMModelTester(self ) lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 ) def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : str = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231 lowerCAmelCase__ : List[str] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231 lowerCAmelCase__ : Any = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231 lowerCAmelCase__ : str = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase__ : Optional[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]]) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[int] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : Any = model(input_ids=_snake_case ,bbox=_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase__ : int = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,_snake_case ,atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase__ : int = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,_snake_case ,atol=1e-3 ) ) @slow def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Dict = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' ,num_labels=2 ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : Tuple = model( input_ids=_snake_case ,bbox=_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=tf.convert_to_tensor([1, 1] ) ,) # test whether we get a loss as a scalar lowerCAmelCase__ : Optional[Any] = outputs.loss lowerCAmelCase__ : List[str] = (2,) self.assertEqual(loss.shape ,_snake_case ) # test the shape of the logits lowerCAmelCase__ : int = outputs.logits lowerCAmelCase__ : Optional[int] = (2, 2) self.assertEqual(logits.shape ,_snake_case ) @slow def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Tuple = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' ,num_labels=13 ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : int = model( input_ids=_snake_case ,bbox=_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case ) # test the shape of the logits lowerCAmelCase__ : Optional[Any] = outputs.logits lowerCAmelCase__ : Optional[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape ,_snake_case ) @slow def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : str = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase__ : Optional[Any] = model(input_ids=_snake_case ,bbox=_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase__ : List[str] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape ,_snake_case ) self.assertEqual(outputs.end_logits.shape ,_snake_case )
361
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : List[Any]=1024 ,lowerCamelCase_ : int=1024 ,lowerCamelCase_ : Dict=False ,**lowerCamelCase_ : Tuple): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_) lowerCAmelCase__ : Optional[Any] = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''train''' ,**lowerCamelCase_) lowerCAmelCase__ : int = tok.pad_token_id def get_lens(lowerCamelCase_ : Tuple): lowerCAmelCase__ : Tuple = tqdm( DataLoader(lowerCamelCase_ ,batch_size=512 ,num_workers=8 ,shuffle=lowerCamelCase_ ,collate_fn=ds.collate_fn) ,desc=str(ds.len_file) ,) lowerCAmelCase__ : Tuple = [] for batch in dl: lowerCAmelCase__ : Dict = batch['''input_ids'''].ne(lowerCamelCase_).sum(1).tolist() lowerCAmelCase__ : Dict = batch['''labels'''].ne(lowerCamelCase_).sum(1).tolist() if consider_target: for src, tgt in zip(lowerCamelCase_ ,lowerCamelCase_): max_lens.append(max(lowerCamelCase_ ,lowerCamelCase_)) else: max_lens.extend(lowerCamelCase_) return max_lens lowerCAmelCase__ : str = get_lens(lowerCamelCase_) lowerCAmelCase__ : Tuple = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''val''' ,**lowerCamelCase_) lowerCAmelCase__ : Optional[int] = get_lens(lowerCamelCase_) pickle_save(lowerCamelCase_ ,train_ds.len_file) pickle_save(lowerCamelCase_ ,val_ds.len_file) if __name__ == "__main__": fire.Fire(save_len_file)
94
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""") __snake_case = {"""target_lang""": """fi""", """source_lang""": """en"""} __snake_case = """>>zh<<""" __snake_case = """Helsinki-NLP/""" if is_torch_available(): __snake_case = """pt""" elif is_tf_available(): __snake_case = """tf""" else: __snake_case = """jax""" @require_sentencepiece class _lowerCAmelCase ( snake_case_ , unittest.TestCase ): __UpperCAmelCase : Tuple = MarianTokenizer __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Any = True def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' super().setUp() snake_case : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case : List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) snake_case : Optional[Any] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] ) snake_case : int = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self , **UpperCamelCase__ ) -> MarianTokenizer: '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' snake_case : List[str] = "</s>" snake_case : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase ( self ) -> str: '''simple docstring''' snake_case : str = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' ) snake_case : Optional[int] = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Optional[Any] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) snake_case : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) snake_case : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("*" )] self.assertIn("source.spm" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : Dict = self.get_tokenizer() snake_case : str = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : Any = self.get_tokenizer() snake_case : List[Any] = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' snake_case : Dict = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) snake_case : Tuple = "Tämä on testi" snake_case : Optional[int] = "This is a test" snake_case : Union[str, Any] = [76, 7, 2047, 2] snake_case : Optional[int] = [69, 12, 11, 940, 2] snake_case : List[str] = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Any = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
203
"""simple docstring""" def __lowerCAmelCase ( lowercase : list[int] ) -> float: """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) snake_case : List[str] = sum(lowercase ) / len(lowercase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
203
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase ( __a ): __lowerCamelCase = """data2vec-audio""" def __init__( self :Optional[int] , _lowercase :int=32 , _lowercase :str=7_68 , _lowercase :Any=12 , _lowercase :List[Any]=12 , _lowercase :Optional[Any]=30_72 , _lowercase :Optional[Any]="gelu" , _lowercase :int=0.1 , _lowercase :Optional[Any]=0.1 , _lowercase :Any=0.1 , _lowercase :Any=0.0 , _lowercase :Union[str, Any]=0.1 , _lowercase :Dict=0.1 , _lowercase :List[Any]=0.02 , _lowercase :Dict=1e-5 , _lowercase :Tuple="gelu" , _lowercase :str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _lowercase :List[str]=(5, 2, 2, 2, 2, 2, 2) , _lowercase :Any=(10, 3, 3, 3, 3, 2, 2) , _lowercase :Optional[int]=False , _lowercase :Union[str, Any]=16 , _lowercase :List[str]=19 , _lowercase :Optional[Any]=5 , _lowercase :int=0.05 , _lowercase :List[Any]=10 , _lowercase :Optional[int]=2 , _lowercase :Optional[Any]=0.0 , _lowercase :List[Any]=10 , _lowercase :str=0 , _lowercase :Any="sum" , _lowercase :List[str]=False , _lowercase :int=False , _lowercase :Any=2_56 , _lowercase :List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , _lowercase :Union[str, Any]=(5, 3, 3, 1, 1) , _lowercase :Union[str, Any]=(1, 2, 3, 1, 1) , _lowercase :Tuple=5_12 , _lowercase :Optional[Any]=0 , _lowercase :Optional[int]=1 , _lowercase :Dict=2 , _lowercase :Any=False , _lowercase :List[Any]=3 , _lowercase :Optional[int]=2 , _lowercase :List[Any]=3 , _lowercase :int=None , **_lowercase :List[str] , ): '''simple docstring''' super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ ) lowercase__ = hidden_size lowercase__ = feat_extract_activation lowercase__ = list(a__ ) lowercase__ = list(a__ ) lowercase__ = list(a__ ) lowercase__ = conv_bias lowercase__ = num_conv_pos_embeddings lowercase__ = num_conv_pos_embedding_groups lowercase__ = conv_pos_kernel_size lowercase__ = len(self.conv_dim ) lowercase__ = num_hidden_layers lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = num_attention_heads lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = feat_proj_dropout lowercase__ = final_dropout lowercase__ = layerdrop lowercase__ = layer_norm_eps lowercase__ = initializer_range lowercase__ = vocab_size lowercase__ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ = mask_time_prob lowercase__ = mask_time_length lowercase__ = mask_time_min_masks lowercase__ = mask_feature_prob lowercase__ = mask_feature_length lowercase__ = mask_feature_min_masks # ctc loss lowercase__ = ctc_loss_reduction lowercase__ = ctc_zero_infinity # adapter lowercase__ = add_adapter lowercase__ = adapter_kernel_size lowercase__ = adapter_stride lowercase__ = num_adapter_layers lowercase__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__ = list(a__ ) lowercase__ = list(a__ ) lowercase__ = list(a__ ) lowercase__ = xvector_output_dim @property def UpperCAmelCase ( self :Optional[int] ): '''simple docstring''' return math.prod(self.conv_stride )
360
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowerCAmelCase ( lowercase_ ): def __init__( self :str , _lowercase :Optional[NestedDataStructureLike[PathLike]] = None , _lowercase :Optional[NamedSplit] = None , _lowercase :Optional[Features] = None , _lowercase :str = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :Optional[int] = None , **_lowercase :Tuple , ): '''simple docstring''' lowercase__ = path_or_paths lowercase__ = split if split or isinstance(_lowercase , _lowercase ) else "train" lowercase__ = features lowercase__ = cache_dir lowercase__ = keep_in_memory lowercase__ = streaming lowercase__ = num_proc lowercase__ = kwargs @abstractmethod def UpperCAmelCase ( self :Any ): '''simple docstring''' pass class lowerCAmelCase ( lowercase_ ): def __init__( self :List[Any] , _lowercase :Optional[Features] = None , _lowercase :str = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :Optional[int] = None , **_lowercase :Optional[int] , ): '''simple docstring''' lowercase__ = features lowercase__ = cache_dir lowercase__ = keep_in_memory lowercase__ = streaming lowercase__ = num_proc lowercase__ = kwargs @abstractmethod def UpperCAmelCase ( self :int ): '''simple docstring''' pass
201
0
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A : Dict = logging.get_logger(__name__) _A : List[Any] = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class a__ ( a_ ): __lowerCAmelCase = """owlvit_text_model""" def __init__( self , _a=49_408 , _a=512 , _a=2_048 , _a=12 , _a=8 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=0 , _a=49_406 , _a=49_407 , **_a , ): super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) lowercase : Any = vocab_size lowercase : Tuple = hidden_size lowercase : Optional[Any] = intermediate_size lowercase : Optional[int] = num_hidden_layers lowercase : Any = num_attention_heads lowercase : List[str] = max_position_embeddings lowercase : str = hidden_act lowercase : Optional[int] = layer_norm_eps lowercase : Optional[int] = attention_dropout lowercase : Optional[Any] = initializer_range lowercase : Optional[Any] = initializer_factor @classmethod def __magic_name__ ( cls , _a , **_a ): cls._set_token_in_kwargs(_a ) lowercase , lowercase : Dict = cls.get_config_dict(_a , **_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": lowercase : Any = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class a__ ( a_ ): __lowerCAmelCase = """owlvit_vision_model""" def __init__( self , _a=768 , _a=3_072 , _a=12 , _a=12 , _a=3 , _a=768 , _a=32 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , **_a , ): super().__init__(**_a ) lowercase : int = hidden_size lowercase : Optional[int] = intermediate_size lowercase : Tuple = num_hidden_layers lowercase : str = num_attention_heads lowercase : Optional[Any] = num_channels lowercase : int = image_size lowercase : Optional[Any] = patch_size lowercase : Optional[int] = hidden_act lowercase : Union[str, Any] = layer_norm_eps lowercase : str = attention_dropout lowercase : Tuple = initializer_range lowercase : Any = initializer_factor @classmethod def __magic_name__ ( cls , _a , **_a ): cls._set_token_in_kwargs(_a ) lowercase , lowercase : Tuple = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": lowercase : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class a__ ( a_ ): __lowerCAmelCase = """owlvit""" __lowerCAmelCase = True def __init__( self , _a=None , _a=None , _a=512 , _a=2.6_5_9_2 , _a=True , **_a , ): super().__init__(**_a ) if text_config is None: lowercase : List[str] = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: lowercase : Tuple = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) lowercase : int = OwlViTTextConfig(**_a ) lowercase : Optional[Any] = OwlViTVisionConfig(**_a ) lowercase : Tuple = projection_dim lowercase : str = logit_scale_init_value lowercase : int = return_dict lowercase : Dict = 1.0 @classmethod def __magic_name__ ( cls , _a , **_a ): cls._set_token_in_kwargs(_a ) lowercase , lowercase : Any = cls.get_config_dict(_a , **_a ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) @classmethod def __magic_name__ ( cls , _a , _a , **_a ): lowercase : Tuple = {} lowercase : List[Any] = text_config lowercase : Optional[Any] = vision_config return cls.from_dict(_a , **_a ) def __magic_name__ ( self ): lowercase : str = copy.deepcopy(self.__dict__ ) lowercase : Tuple = self.text_config.to_dict() lowercase : Union[str, Any] = self.vision_config.to_dict() lowercase : int = self.__class__.model_type return output class a__ ( a_ ): @property def __magic_name__ ( self ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def __magic_name__ ( self ): return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def __magic_name__ ( self ): return 1E-4 def __magic_name__ ( self , _a , _a = -1 , _a = -1 , _a = None , ): lowercase : List[str] = super().generate_dummy_inputs( processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a ) lowercase : Optional[Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=_a , framework=_a ) return {**text_input_dict, **image_input_dict} @property def __magic_name__ ( self ): return 14
202
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _A : Optional[int] = logging.getLogger(__name__) class a__ ( a_ ): def __init__( self , _a=-1 ): # in NER datasets, the last column is usually reserved for NER label lowercase : List[str] = label_idx def __magic_name__ ( self , _a , _a ): if isinstance(_a , _a ): lowercase : Optional[Any] = mode.value lowercase : List[str] = os.path.join(_a , f"""{mode}.txt""" ) lowercase : str = 1 lowercase : Optional[int] = [] with open(_a , encoding="utf-8" ) as f: lowercase : List[Any] = [] lowercase : Optional[int] = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) ) guid_index += 1 lowercase : int = [] lowercase : int = [] else: lowercase : Optional[Any] = line.split(" " ) words.append(splits[0] ) if len(_a ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) ) return examples def __magic_name__ ( self , _a , _a , _a ): lowercase : List[str] = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(_a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase : Any = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(_a ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __magic_name__ ( self , _a ): if path: with open(_a , "r" ) as f: lowercase : Optional[Any] = f.read().splitlines() if "O" not in labels: lowercase : List[Any] = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( a_ ): def __init__( self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __magic_name__ ( self , _a ): if path: with open(_a , "r" ) as f: lowercase : Tuple = f.read().splitlines() if "O" not in labels: lowercase : Optional[int] = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( a_ ): def __magic_name__ ( self , _a , _a ): if isinstance(_a , _a ): lowercase : List[Any] = mode.value lowercase : Optional[int] = os.path.join(_a , f"""{mode}.txt""" ) lowercase : Tuple = 1 lowercase : List[str] = [] with open(_a , encoding="utf-8" ) as f: for sentence in parse_incr(_a ): lowercase : Optional[Any] = [] lowercase : str = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(_a ) == len(_a ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) ) guid_index += 1 return examples def __magic_name__ ( self , _a , _a , _a ): lowercase : str = 0 for sentence in parse_incr(_a ): lowercase : List[Any] = preds_list[example_id] lowercase : List[str] = "" for token in sentence: out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(_a ) example_id += 1 def __magic_name__ ( self , _a ): if path: with open(_a , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
202
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowercase : Optional[Any] = None lowercase : List[str] = logging.get_logger(__name__) lowercase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowercase : Tuple = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } lowercase : Any = { "xlnet-base-cased": None, "xlnet-large-cased": None, } lowercase : Optional[int] = "▁" # Segments (not really needed) lowercase : List[Any] = 0 lowercase : Dict = 1 lowercase : Tuple = 2 lowercase : str = 3 lowercase : Optional[Any] = 4 class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): """simple docstring""" lowercase : Tuple = VOCAB_FILES_NAMES lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = 'left' lowercase : Union[str, Any] = XLNetTokenizer def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase=["<eop>", "<eod>"] , **__UpperCamelCase , ) -> List[str]: '''simple docstring''' __UpperCamelCase : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token super().__init__( vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) __UpperCamelCase : Dict = 3 __UpperCamelCase : Tuple = do_lower_case __UpperCamelCase : Dict = remove_space __UpperCamelCase : str = keep_accents __UpperCamelCase : List[Any] = vocab_file __UpperCamelCase : Dict = False if not self.vocab_file else True def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' __UpperCamelCase : Union[str, Any] = [self.sep_token_id] __UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]: '''simple docstring''' __UpperCamelCase : Tuple = [self.sep_token_id] __UpperCamelCase : Any = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase : str = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
369
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : Union[str, Any] = { "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): """simple docstring""" lowercase : Optional[Any] = 'canine' def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_63_84 , __UpperCamelCase=16 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=0Xe000 , __UpperCamelCase=0Xe001 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=8 , __UpperCamelCase=1_63_84 , __UpperCamelCase=1_28 , **__UpperCamelCase , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) __UpperCamelCase : List[str] = max_position_embeddings __UpperCamelCase : int = hidden_size __UpperCamelCase : Tuple = num_hidden_layers __UpperCamelCase : str = num_attention_heads __UpperCamelCase : Optional[int] = intermediate_size __UpperCamelCase : int = hidden_act __UpperCamelCase : Dict = hidden_dropout_prob __UpperCamelCase : List[str] = attention_probs_dropout_prob __UpperCamelCase : Optional[Any] = initializer_range __UpperCamelCase : List[str] = type_vocab_size __UpperCamelCase : str = layer_norm_eps # Character config: __UpperCamelCase : str = downsampling_rate __UpperCamelCase : Tuple = upsampling_kernel_size __UpperCamelCase : Union[str, Any] = num_hash_functions __UpperCamelCase : List[str] = num_hash_buckets __UpperCamelCase : List[Any] = local_transformer_stride
171
0
"""simple docstring""" import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore lowercase_ = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" lowercase_ = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") lowercase_ = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") lowercase_ = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") lowercase_ = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
45
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase__ = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
151
0
'''simple docstring''' def _lowerCAmelCase ( lowercase ) -> Dict: if not head: return True # split the list to two parts __lowerCAmelCase , __lowerCAmelCase = head.next, head while fast and fast.next: __lowerCAmelCase = fast.next.next __lowerCAmelCase = slow.next __lowerCAmelCase = slow.next __lowerCAmelCase = None # Don't forget here! But forget still works! # reverse the second part __lowerCAmelCase = None while second: __lowerCAmelCase = second.next __lowerCAmelCase = node __lowerCAmelCase = second __lowerCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False __lowerCAmelCase = node.next __lowerCAmelCase = head.next return True def _lowerCAmelCase ( lowercase ) -> Any: if not head or not head.next: return True # 1. Get the midpoint (slow) __lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = head while fast and fast.next: __lowerCAmelCase , __lowerCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack __lowerCAmelCase = [slow.val] while slow.next: __lowerCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False __lowerCAmelCase = cur.next return True def _lowerCAmelCase ( lowercase ) -> Optional[int]: if not head or not head.next: return True __lowerCAmelCase = {} __lowerCAmelCase = 0 while head: if head.val in d: d[head.val].append(lowercase ) else: __lowerCAmelCase = [pos] __lowerCAmelCase = head.next pos += 1 __lowerCAmelCase = pos - 1 __lowerCAmelCase = 0 for v in d.values(): if len(lowercase ) % 2 != 0: middle += 1 else: __lowerCAmelCase = 0 for i in range(0 , len(lowercase ) ): if v[i] + v[len(lowercase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
367
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a : List[str] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
46
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE : Dict = { """vocab_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-german-cased""": ( """https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json""" ), """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json""" ), }, } __SCREAMING_SNAKE_CASE : Optional[Any] = { """distilbert-base-uncased""": 512, """distilbert-base-uncased-distilled-squad""": 512, """distilbert-base-cased""": 512, """distilbert-base-cased-distilled-squad""": 512, """distilbert-base-german-cased""": 512, """distilbert-base-multilingual-cased""": 512, } __SCREAMING_SNAKE_CASE : List[Any] = { """distilbert-base-uncased""": {"""do_lower_case""": True}, """distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True}, """distilbert-base-cased""": {"""do_lower_case""": False}, """distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False}, """distilbert-base-german-cased""": {"""do_lower_case""": False}, """distilbert-base-multilingual-cased""": {"""do_lower_case""": False}, } class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase: str = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase: Any = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase: str = ["input_ids", "attention_mask"] __UpperCamelCase: List[str] = DistilBertTokenizer def __init__( self : str , A : int=None , A : Tuple=None , A : Tuple=True , A : Dict="[UNK]" , A : List[Any]="[SEP]" , A : Optional[Any]="[PAD]" , A : Dict="[CLS]" , A : Tuple="[MASK]" , A : str=True , A : Dict=None , **A : List[Any] , ): super().__init__( A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , ) _UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , A ) != do_lower_case or normalizer_state.get("strip_accents" , A ) != strip_accents or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars ): _UpperCAmelCase : Dict = getattr(A , normalizer_state.pop("type" ) ) _UpperCAmelCase : int = do_lower_case _UpperCAmelCase : Optional[int] = strip_accents _UpperCAmelCase : str = tokenize_chinese_chars _UpperCAmelCase : List[Any] = normalizer_class(**A ) _UpperCAmelCase : Dict = do_lower_case def _A ( self : List[Any] , A : Tuple , A : Any=None ): _UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _A ( self : int , A : List[int] , A : Optional[List[int]] = None ): _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A ( self : Dict , A : str , A : Optional[str] = None ): _UpperCAmelCase : Any = self._tokenizer.model.save(A , name=A ) return tuple(A )
31
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : List[Any] = '''bloom''' SCREAMING_SNAKE_CASE_ : Dict = ['''past_key_values'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self ,SCREAMING_SNAKE_CASE__=25_08_80 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=8 ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,**SCREAMING_SNAKE_CASE__ ,) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = vocab_size # Backward compatibility with n_embed kwarg __SCREAMING_SNAKE_CASE :List[Any] = kwargs.pop('''n_embed''' ,SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :List[str] = hidden_size if n_embed is None else n_embed __SCREAMING_SNAKE_CASE :Optional[int] = n_layer __SCREAMING_SNAKE_CASE :int = n_head __SCREAMING_SNAKE_CASE :Optional[Any] = layer_norm_epsilon __SCREAMING_SNAKE_CASE :Dict = initializer_range __SCREAMING_SNAKE_CASE :int = use_cache __SCREAMING_SNAKE_CASE :Tuple = pretraining_tp __SCREAMING_SNAKE_CASE :str = apply_residual_connection_post_layernorm __SCREAMING_SNAKE_CASE :Dict = hidden_dropout __SCREAMING_SNAKE_CASE :str = attention_dropout __SCREAMING_SNAKE_CASE :str = bos_token_id __SCREAMING_SNAKE_CASE :Optional[Any] = eos_token_id __SCREAMING_SNAKE_CASE :Union[str, Any] = slow_but_exact super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : str = version.parse('''1.12''' ) def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = "default" ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,) -> Any: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE__ ,task=SCREAMING_SNAKE_CASE__ ,patching_specs=SCREAMING_SNAKE_CASE__ ,use_past=SCREAMING_SNAKE_CASE__ ) if not getattr(self._config ,'''pad_token_id''' ,SCREAMING_SNAKE_CASE__ ): # TODO: how to do that better? __SCREAMING_SNAKE_CASE :Union[str, Any] = 0 @property def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' ,inverted_values_shape=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Dict = {0: '''batch''', 1: '''past_sequence + sequence'''} else: __SCREAMING_SNAKE_CASE :Any = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCamelCase ( self ) -> int: """simple docstring""" return self._config.n_layer @property def _UpperCamelCase ( self ) -> int: """simple docstring""" return self._config.n_head @property def _UpperCamelCase ( self ) -> float: """simple docstring""" return 1E-3 def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = super(SCREAMING_SNAKE_CASE__ ,self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ ) # We need to order the input in the way they appears in the forward() __SCREAMING_SNAKE_CASE :str = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __SCREAMING_SNAKE_CASE :List[str] = seqlen + 2 __SCREAMING_SNAKE_CASE :Optional[int] = self._config.hidden_size // self.num_attention_heads __SCREAMING_SNAKE_CASE :Union[str, Any] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) __SCREAMING_SNAKE_CASE :Dict = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) __SCREAMING_SNAKE_CASE :Tuple = [ (torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers ) ] __SCREAMING_SNAKE_CASE :List[Any] = common_inputs['''attention_mask'''] if self.use_past: __SCREAMING_SNAKE_CASE :List[Any] = ordered_inputs['''attention_mask'''].dtype __SCREAMING_SNAKE_CASE :str = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 ) return ordered_inputs @property def _UpperCamelCase ( self ) -> int: """simple docstring""" return 13
239
"""simple docstring""" import math import unittest def __lowerCamelCase ( a_ : int ) -> bool: assert isinstance(a_ , a_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _SCREAMING_SNAKE_CASE( unittest.TestCase ): def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def _UpperCamelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,) self.assertFalse( is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
239
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() snake_case : int = logging.get_logger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): """simple docstring""" a :Union[str, Any] = os.path.abspath(UpperCAmelCase_ ) logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model a :Union[str, Any] = tf.train.list_variables(UpperCAmelCase_ ) a :Optional[Any] = [] a :List[str] = [] a :Optional[int] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") a :Dict = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(F'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' a :Any = name[1:] # figure out how many levels deep the name is a :Union[str, Any] = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(UpperCAmelCase_ ) # read data a :int = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ ) names.append('''/'''.join(UpperCAmelCase_ ) ) arrays.append(UpperCAmelCase_ ) logger.info(F'''Read a total of {len(UpperCAmelCase_ ):,} layers''' ) # Sanity check if len(set(UpperCAmelCase_ ) ) != 1: raise ValueError(F'''Found layer names with different depths (layer depth {list(set(UpperCAmelCase_ ) )})''' ) a :str = list(set(UpperCAmelCase_ ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(UpperCAmelCase_ , UpperCAmelCase_ ): a :int = full_name.split('''/''' ) a :Dict = model a :int = [] for i, m_name in enumerate(UpperCAmelCase_ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): a :List[str] = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) a :Dict = getattr(UpperCAmelCase_ , '''embeddings''' ) a :Tuple = getattr(UpperCAmelCase_ , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) a :Optional[int] = getattr(UpperCAmelCase_ , '''encoder''' ) a :List[Any] = getattr(UpperCAmelCase_ , '''layer''' ) a :int = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) a :List[Any] = getattr(UpperCAmelCase_ , '''pooler''' ) a :str = getattr(UpperCAmelCase_ , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) a :int = getattr(UpperCAmelCase_ , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) a :Union[str, Any] = getattr(UpperCAmelCase_ , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) a :Any = getattr(UpperCAmelCase_ , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) a :Any = getattr(UpperCAmelCase_ , '''token_type_embeddings''' ) else: raise ValueError(F'''Unknown embedding layer with name {full_name}''' ) trace.append('''weight''' ) a :List[str] = getattr(UpperCAmelCase_ , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) a :str = getattr(UpperCAmelCase_ , '''attention''' ) a :Any = getattr(UpperCAmelCase_ , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) a :int = getattr(UpperCAmelCase_ , '''attention''' ) a :int = getattr(UpperCAmelCase_ , '''output''' ) a :str = getattr(UpperCAmelCase_ , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) a :int = getattr(UpperCAmelCase_ , '''attention''' ) a :int = getattr(UpperCAmelCase_ , '''output''' ) a :Any = getattr(UpperCAmelCase_ , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) a :List[Any] = getattr(UpperCAmelCase_ , '''output''' ) a :int = getattr(UpperCAmelCase_ , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) a :str = getattr(UpperCAmelCase_ , '''output''' ) a :int = getattr(UpperCAmelCase_ , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) a :List[str] = getattr(UpperCAmelCase_ , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) a :Optional[Any] = getattr(UpperCAmelCase_ , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) a :Any = getattr(UpperCAmelCase_ , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) a :Union[str, Any] = getattr(UpperCAmelCase_ , '''intermediate''' ) a :List[Any] = getattr(UpperCAmelCase_ , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) a :List[str] = getattr(UpperCAmelCase_ , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) a :Optional[int] = getattr(UpperCAmelCase_ , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) a :Any = getattr(UpperCAmelCase_ , '''weight''' ) else: logger.warning(F'''Ignored {m_name}''' ) # for certain layers reshape is necessary a :int = '''.'''.join(UpperCAmelCase_ ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , UpperCAmelCase_ ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''' , UpperCAmelCase_ ): a :List[str] = array.reshape(pointer.data.shape ) if "kernel" in full_name: a :int = array.transpose() if pointer.shape == array.shape: a :Dict = torch.from_numpy(UpperCAmelCase_ ) else: raise ValueError( F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' F''' {array.shape}''' ) logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ): """simple docstring""" logger.info(F'''Loading model based on config from {config_path}...''' ) a :Dict = BertConfig.from_json_file(UpperCAmelCase_ ) a :Optional[int] = BertModel(UpperCAmelCase_ ) # Load weights from checkpoint logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , UpperCAmelCase_ ) if __name__ == "__main__": snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model (must include filename).''', ) snake_case : Union[str, Any] = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
94
import random def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[int] ) -> tuple: UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = [], [], [] for element in data: if element < pivot: less.append(__UpperCAmelCase ) elif element > pivot: greater.append(__UpperCAmelCase ) else: equal.append(__UpperCAmelCase ) return less, equal, greater def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(__UpperCAmelCase ) or index < 0: return None UpperCamelCase__ : List[str] = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )] UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = _partition(__UpperCAmelCase , __UpperCAmelCase ) UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase ) UpperCamelCase__ : Dict = len(__UpperCAmelCase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__UpperCAmelCase , __UpperCAmelCase ) # must be in larger else: return quick_select(__UpperCAmelCase , index - (m + count) )
201
0
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py _UpperCamelCase = '''src/diffusers''' # Matches is_xxx_available() _UpperCamelCase = re.compile(r'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla _UpperCamelCase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') _UpperCamelCase = ''' {0} = None ''' _UpperCamelCase = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' _UpperCamelCase = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def lowercase_ ( lowerCAmelCase__ : Dict ): """simple docstring""" __UpperCAmelCase : Optional[int] = _re_backend.findall(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) == 0: return None return "_and_".join(lowerCAmelCase__ ) def lowercase_ ( ): """simple docstring""" with open(os.path.join(lowerCAmelCase__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __UpperCAmelCase : str = f.readlines() # Get to the point we do the actual imports for type checking __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : int = {} # Go through the end of the file while line_index < len(lowerCAmelCase__ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __UpperCAmelCase : List[Any] = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 __UpperCAmelCase : Optional[Any] = [] # Until we unindent, add backend objects to the list while line_index < len(lowerCAmelCase__ ) and len(lines[line_index] ) > 1: __UpperCAmelCase : Optional[int] = lines[line_index] __UpperCAmelCase : Optional[int] = _re_single_line_import.search(lowerCAmelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase : List[Any] = objects else: line_index += 1 return backend_specific_objects def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ): """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(lowerCAmelCase__ ) elif name.islower(): return DUMMY_FUNCTION.format(lowerCAmelCase__ , lowerCAmelCase__ ) else: return DUMMY_CLASS.format(lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : Optional[int]=None ): """simple docstring""" if backend_specific_objects is None: __UpperCAmelCase : List[Any] = read_init() # For special correspondence backend to module name as used in the function requires_modulename __UpperCAmelCase : str = {} for backend, objects in backend_specific_objects.items(): __UpperCAmelCase : List[str] = """[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]""" __UpperCAmelCase : Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(lowerCAmelCase__ , lowerCAmelCase__ ) for o in objects] ) __UpperCAmelCase : Optional[Any] = dummy_file return dummy_files def lowercase_ ( lowerCAmelCase__ : Optional[int]=False ): """simple docstring""" __UpperCAmelCase : List[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __UpperCAmelCase : Optional[int] = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. __UpperCAmelCase : str = os.path.join(lowerCAmelCase__ , """utils""" ) __UpperCAmelCase : Dict = { backend: os.path.join(lowerCAmelCase__ , f'dummy_{short_names.get(lowerCAmelCase__ , lowerCAmelCase__ )}_objects.py' ) for backend in dummy_files.keys() } __UpperCAmelCase : Tuple = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(lowerCAmelCase__ ): with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __UpperCAmelCase : List[str] = f.read() else: __UpperCAmelCase : List[Any] = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase__ , lowerCAmelCase__ )}_objects.py as the main ' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f'diffusers.utils.dummy_{short_names.get(lowerCAmelCase__ , lowerCAmelCase__ )}_objects.py. Run `make fix-copies` ' """to fix this.""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _UpperCamelCase = parser.parse_args() check_dummies(args.fix_and_overwrite)
16
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = do_lower_case __UpperCAmelCase : str = strip_accents __UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars __UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase ) __UpperCAmelCase : List[Any] = do_lower_case def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
16
1
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
60
"""simple docstring""" import copy import random from transformers import CLIPTokenizer class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) UpperCAmelCase__ : str = {} def _a (self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = super().add_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" """ `placeholder_token` that is not already in the tokenizer.""" ) def _a (self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=1 , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : int = [] if num_vec_per_token == 1: self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) output.append(_lowerCamelCase ) else: UpperCAmelCase__ : Any = [] for i in range(_lowerCamelCase ): UpperCAmelCase__ : Optional[int] = placeholder_token + F"""_{i}""" self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) output.append(_lowerCamelCase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) UpperCAmelCase__ : Dict = output def _a (self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 ): """simple docstring""" if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : str = [] for i in range(len(_lowerCamelCase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCamelCase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: UpperCAmelCase__ : List[str] = self.token_map[placeholder_token] UpperCAmelCase__ : Any = tokens[: 1 + int(len(_lowerCamelCase ) * prop_tokens_to_load )] if vector_shuffle: UpperCAmelCase__ : Any = copy.copy(_lowerCamelCase ) random.shuffle(_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = text.replace(_lowerCamelCase , """ """.join(_lowerCamelCase ) ) return text def __call__(self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 , **_lowerCamelCase ): """simple docstring""" return super().__call__( self.replace_placeholder_tokens_in_text( _lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , ) def _a (self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 , **_lowerCamelCase ): """simple docstring""" return super().encode( self.replace_placeholder_tokens_in_text( _lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
171
0
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __UpperCamelCase (*_SCREAMING_SNAKE_CASE ) -> str: with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) lowercase_ = int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) lowercase_ = torch.device("""cuda""", local_rank) lowercase_ = socket.gethostname() lowercase_ = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank lowercase_ = dist.get_rank() lowercase_ = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
350
class SCREAMING_SNAKE_CASE (UpperCAmelCase ): pass class SCREAMING_SNAKE_CASE (UpperCAmelCase ): pass class SCREAMING_SNAKE_CASE : def __init__( self : Dict )-> Optional[int]: """simple docstring""" lowercase__ = [ [], [], [], ] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int )-> None: """simple docstring""" try: if len(self.queues[priority] ) >= 100: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(a ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def SCREAMING_SNAKE_CASE_ ( self : int )-> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self : Dict )-> str: """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] )-> Any: """simple docstring""" lowercase__ = [] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> None: """simple docstring""" if len(self.queue ) == 100: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int: """simple docstring""" if not self.queue: raise UnderFlowError('The queue is empty' ) else: lowercase__ = min(self.queue ) self.queue.remove(a ) return data def __str__( self : Union[str, Any] )-> str: """simple docstring""" return str(self.queue ) def __UpperCamelCase () -> str: lowercase__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __UpperCamelCase () -> List[str]: lowercase__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
269
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ = "▁" lowercase__ = {"vocab_file": "spiece.model"} lowercase__ = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } lowercase__ = { "google/pegasus-xsum": 512, } lowercase__ = logging.get_logger(__name__) class A_ ( _UpperCAmelCase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""] def __init__( self : Dict , lowercase_ : Dict , lowercase_ : Optional[Any]="<pad>" , lowercase_ : Dict="</s>" , lowercase_ : str="<unk>" , lowercase_ : List[Any]="<mask_2>" , lowercase_ : List[str]="<mask_1>" , lowercase_ : int=None , lowercase_ : Optional[int]=103 , lowercase_ : int = None , **lowercase_ : List[Any] , ) -> None: UpperCAmelCase : Dict = offset if additional_special_tokens is not None: if not isinstance(lowercase_ , lowercase_ ): raise TypeError( f"""additional_special_tokens should be of type {type(lowercase_ )}, but is""" f""" {type(lowercase_ )}""" ) UpperCAmelCase : int = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 ) ] if len(set(lowercase_ ) ) != len(lowercase_ ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) UpperCAmelCase : Tuple = additional_special_tokens_extended else: UpperCAmelCase : List[Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) UpperCAmelCase : Union[str, Any] = mask_token_sent UpperCAmelCase : List[Any] = vocab_file UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) # add special tokens to encoder dict UpperCAmelCase : List[str] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} @property def UpperCAmelCase_ ( self : int ) -> int: return len(self.sp_model ) + self.offset def UpperCAmelCase_ ( self : Any ) -> Dict[str, int]: UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Optional[int]: UpperCAmelCase : Optional[int] = self.__dict__.copy() UpperCAmelCase : int = None return state def __setstate__( self : Union[str, Any] , lowercase_ : Dict ) -> List[Any]: UpperCAmelCase : Tuple = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCAmelCase : List[Any] = {} UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self : int , lowercase_ : Dict ) -> List[str]: return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[str] ) -> int: if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] UpperCAmelCase : Any = self.sp_model.piece_to_id(lowercase_ ) return sp_id + self.offset def UpperCAmelCase_ ( self : Any , lowercase_ : Any ) -> str: if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: UpperCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(index - self.offset ) return token def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_ ) + token UpperCAmelCase : Dict = [] else: current_sub_tokens.append(lowercase_ ) out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[Any]=False ) -> Tuple: return 1 def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Tuple ) -> Tuple: UpperCAmelCase : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : List[str] = None , lowercase_ : Union[str, Any] = False ) -> List[int]: if already_has_special_tokens: return self._special_token_mask(lowercase_ ) elif token_ids_a is None: return self._special_token_mask(lowercase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : List[Any]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self : int , lowercase_ : Dict , lowercase_ : Any = None ) -> Tuple[str]: if not os.path.isdir(lowercase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase : Optional[Any] = os.path.join( lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , 'wb' ) as fi: UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
151
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
46
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase : @staticmethod def __A ( *UpperCAmelCase__ , **UpperCAmelCase__ ): pass def UpperCamelCase ( _A : Image )-> str: """simple docstring""" A__ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase ( unittest.TestCase ): lowerCAmelCase : List[str] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = DepthEstimationPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCAmelCase__ ) import datasets A__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A__ = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , UpperCAmelCase__ , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def __A ( self ): pass @slow @require_torch def __A ( self ): A__ = "Intel/dpt-large" A__ = pipeline("depth-estimation" , model=UpperCAmelCase__ ) A__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A__ = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def __A ( self ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
198
from manim import * class UpperCamelCase ( _UpperCAmelCase ): def __A ( self ): A__ = Rectangle(height=0.5 , width=0.5 ) A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) A__ = [mem.copy() for i in range(6 )] A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) A__ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) A__ = Text("CPU" , font_size=24 ) A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCAmelCase__ ) A__ = [mem.copy() for i in range(4 )] A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) A__ = Text("GPU" , font_size=24 ) A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(UpperCAmelCase__ ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) A__ = Text("Model" , font_size=24 ) A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.add(UpperCAmelCase__ ) A__ = [] for i, rect in enumerate(UpperCAmelCase__ ): rect.set_stroke(UpperCAmelCase__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) A__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=UpperCAmelCase__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCAmelCase__ , buff=0.0 ) self.add(UpperCAmelCase__ ) cpu_targs.append(UpperCAmelCase__ ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) A__ = Text("Loaded Checkpoint" , font_size=24 ) A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , aligned_edge=UpperCAmelCase__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) A__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A__ = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) A__ = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) ) self.play(Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) ) A__ = [] A__ = [] for i, rect in enumerate(UpperCAmelCase__ ): A__ = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 ) target.move_to(UpperCAmelCase__ ) first_animations.append(GrowFromCenter(UpperCAmelCase__ , run_time=1 ) ) A__ = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) ) self.play(*UpperCAmelCase__ ) self.play(*UpperCAmelCase__ ) self.wait()
198
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : Optional[Any] = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class __magic_name__ ( _UpperCAmelCase): UpperCamelCase__ = '''codegen''' UpperCamelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , lowercase_ : List[str]=50400 , lowercase_ : Dict=2048 , lowercase_ : Union[str, Any]=2048 , lowercase_ : Dict=4096 , lowercase_ : Optional[int]=28 , lowercase_ : Any=16 , lowercase_ : Tuple=64 , lowercase_ : Tuple=None , lowercase_ : List[str]="gelu_new" , lowercase_ : List[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[str]=1E-5 , lowercase_ : Optional[int]=0.02 , lowercase_ : Optional[int]=True , lowercase_ : Any=50256 , lowercase_ : Optional[int]=50256 , lowercase_ : str=False , **lowercase_ : str , ): lowercase_ : Optional[int] = vocab_size lowercase_ : Tuple = n_ctx lowercase_ : int = n_positions lowercase_ : Union[str, Any] = n_embd lowercase_ : Optional[int] = n_layer lowercase_ : Optional[Any] = n_head lowercase_ : Optional[Any] = n_inner lowercase_ : Tuple = rotary_dim lowercase_ : int = activation_function lowercase_ : List[Any] = resid_pdrop lowercase_ : List[str] = embd_pdrop lowercase_ : Union[str, Any] = attn_pdrop lowercase_ : List[str] = layer_norm_epsilon lowercase_ : Optional[int] = initializer_range lowercase_ : int = use_cache lowercase_ : Any = bos_token_id lowercase_ : int = eos_token_id super().__init__( bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ ) class __magic_name__ ( _UpperCAmelCase): def __init__( self : Union[str, Any] , lowercase_ : PretrainedConfig , lowercase_ : str = "default" , lowercase_ : List[PatchingSpec] = None , lowercase_ : bool = False , ): super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ ) if not getattr(self._config , """pad_token_id""" , lowercase_ ): # TODO: how to do that better? lowercase_ : Dict = 0 @property def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Optional[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" ) lowercase_ : Union[str, Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: lowercase_ : str = {0: """batch""", 1: """sequence"""} return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self._config.n_layer @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self._config.n_head def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): lowercase_ : Dict = super(lowercase_ , self ).generate_dummy_inputs( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) # We need to order the input in the way they appears in the forward() lowercase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ , lowercase_ : Dict = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowercase_ : Optional[int] = seqlen + 2 lowercase_ : str = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase_ : Tuple = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers ) ] lowercase_ : str = common_inputs["""attention_mask"""] if self.use_past: lowercase_ : str = ordered_inputs["""attention_mask"""].dtype lowercase_ : List[str] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) return ordered_inputs @property def SCREAMING_SNAKE_CASE_ ( self : int ): return 13
239
'''simple docstring''' import operator as op def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int: lowercase_ : Optional[Any] = [] lowercase_ : str = lambda UpperCAmelCase__ , UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation lowercase_ : Optional[Any] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(UpperCAmelCase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(UpperCAmelCase__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) else: lowercase_ : str = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) lowercase_ : Optional[int] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) stack.append( str(opr[x](int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": _lowercase : Tuple = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
239
1
'''simple docstring''' import os import sys _a : Union[str, Any] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _a : List[str] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> Tuple: return AutoConfig.from_pretrained(*lowercase , **lowercase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> Optional[int]: return AutoTokenizer.from_pretrained(*lowercase , **lowercase ) @add_start_docstrings(AutoModel.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> Dict: return AutoModel.from_pretrained(*lowercase , **lowercase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> List[str]: return AutoModelForCausalLM.from_pretrained(*lowercase , **lowercase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*lowercase , **lowercase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> Optional[Any]: return AutoModelForSequenceClassification.from_pretrained(*lowercase , **lowercase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _lowerCAmelCase ( *lowercase , **lowercase ) -> Tuple: return AutoModelForQuestionAnswering.from_pretrained(*lowercase , **lowercase )
350
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def _lowerCAmelCase ( lowercase=None ) -> Any: __lowerCAmelCase = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase ) # The main config parser __lowerCAmelCase = config_command_parser(lowercase ) # The subparser to add commands to __lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" ) # Then add other parsers with the parent parser default_command_parser(lowercase , parents=[parent_parser] ) update_command_parser(lowercase , parents=[parent_parser] ) return config_parser def _lowerCAmelCase ( ) -> List[Any]: __lowerCAmelCase = get_config_parser() __lowerCAmelCase = config_parser.parse_args() if not hasattr(lowercase , """func""" ): config_parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
46
0
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowerCAmelCase_ = 'src/diffusers' # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla lowerCAmelCase_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') lowerCAmelCase_ = '\n{0} = None\n' lowerCAmelCase_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' lowerCAmelCase_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]: lowercase__ : str = _re_backend.findall(__lowerCamelCase ) if len(__lowerCamelCase ) == 0: return None return "_and_".join(__lowerCamelCase ) def __UpperCAmelCase ( ) -> int: with open(os.path.join(__lowerCamelCase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase__ : List[Any] = f.readlines() # Get to the point we do the actual imports for type checking lowercase__ : str = 0 lowercase__ : str = {} # Go through the end of the file while line_index < len(__lowerCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowercase__ : Any = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowercase__ : Tuple = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1: lowercase__ : List[Any] = lines[line_index] lowercase__ : Any = _re_single_line_import.search(__lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCamelCase ) > 0: lowercase__ : List[Any] = objects else: line_index += 1 return backend_specific_objects def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: if name.isupper(): return DUMMY_CONSTANT.format(__lowerCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCamelCase , __lowerCamelCase ) else: return DUMMY_CLASS.format(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase=None ) -> Tuple: if backend_specific_objects is None: lowercase__ : List[str] = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowercase__ : Union[str, Any] = {} for backend, objects in backend_specific_objects.items(): lowercase__ : List[Any] = '''[''' + ''', '''.join(f"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']''' lowercase__ : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCamelCase , __lowerCamelCase ) for o in objects] ) lowercase__ : str = dummy_file return dummy_files def __UpperCAmelCase ( __lowerCamelCase=False ) -> Optional[Any]: lowercase__ : Any = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowercase__ : Any = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowercase__ : int = os.path.join(__lowerCamelCase , '''utils''' ) lowercase__ : List[Any] = { backend: os.path.join(__lowerCamelCase , f"""dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py""" ) for backend in dummy_files.keys() } lowercase__ : Tuple = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCamelCase ): with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase__ : List[Any] = f.read() else: lowercase__ : Any = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py as the main """ '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' f"""diffusers.utils.dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py. Run `make fix-copies` """ '''to fix this.''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase_ = parser.parse_args() check_dummies(args.fix_and_overwrite)
16
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None: """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,) super().__init__(*_snake_case ,**_snake_case )
16
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging A_ = logging.get_logger(__name__) A_ = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): snake_case_ = '''bloom''' snake_case_ = ['''past_key_values'''] snake_case_ = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self : Optional[Any] , snake_case : Dict=25_0880 , snake_case : str=64 , snake_case : List[str]=2 , snake_case : Tuple=8 , snake_case : Any=1e-5 , snake_case : List[str]=0.02 , snake_case : Any=True , snake_case : Dict=1 , snake_case : List[Any]=2 , snake_case : Dict=False , snake_case : Tuple=0.0 , snake_case : List[str]=0.0 , snake_case : List[Any]=1 , snake_case : Any=False , **snake_case : List[Any] , ): '''simple docstring''' A__ : Dict = vocab_size # Backward compatibility with n_embed kwarg A__ : str = kwargs.pop("""n_embed""" , _snake_case ) A__ : List[Any] = hidden_size if n_embed is None else n_embed A__ : Optional[Any] = n_layer A__ : List[Any] = n_head A__ : Optional[int] = layer_norm_epsilon A__ : Union[str, Any] = initializer_range A__ : Union[str, Any] = use_cache A__ : List[Any] = pretraining_tp A__ : Tuple = apply_residual_connection_post_layernorm A__ : Optional[Any] = hidden_dropout A__ : Tuple = attention_dropout A__ : Optional[int] = bos_token_id A__ : Union[str, Any] = eos_token_id A__ : str = slow_but_exact super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): snake_case_ = version.parse('1.12' ) def __init__( self : int , snake_case : PretrainedConfig , snake_case : str = "default" , snake_case : List[PatchingSpec] = None , snake_case : bool = False , ): '''simple docstring''' super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case ) if not getattr(self._config , """pad_token_id""" , _snake_case ): # TODO: how to do that better? A__ : Optional[Any] = 0 @property def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : List[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_snake_case , direction="""inputs""" , inverted_values_shape=_snake_case ) A__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""} else: A__ : List[str] = {0: """batch""", 1: """sequence"""} return common_inputs @property def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' return self._config.n_layer @property def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' return self._config.n_head @property def _UpperCamelCase ( self : Any ): '''simple docstring''' return 1e-3 def _UpperCamelCase ( self : Dict , snake_case : "PreTrainedTokenizer" , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , ): '''simple docstring''' A__ : Any = super(_snake_case , self ).generate_dummy_inputs( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) # We need to order the input in the way they appears in the forward() A__ : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch A__ , A__ : List[Any] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values A__ : str = seqlen + 2 A__ : Optional[int] = self._config.hidden_size // self.num_attention_heads A__ : List[str] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) A__ : Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) A__ : List[str] = [ (torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers ) ] A__ : Tuple = common_inputs["""attention_mask"""] if self.use_past: A__ : Tuple = ordered_inputs["""attention_mask"""].dtype A__ : Any = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 ) return ordered_inputs @property def _UpperCamelCase ( self : int ): '''simple docstring''' return 13
357
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): snake_case_ = 'Salesforce/blip-image-captioning-base' snake_case_ = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) snake_case_ = 'image_captioner' snake_case_ = AutoModelForVisionaSeq snake_case_ = ['image'] snake_case_ = ['text'] def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ): '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*snake_case , **snake_case ) def _UpperCamelCase ( self : int , snake_case : "Image" ): '''simple docstring''' return self.pre_processor(images=snake_case , return_tensors="""pt""" ) def _UpperCamelCase ( self : int , snake_case : List[Any] ): '''simple docstring''' return self.model.generate(**snake_case ) def _UpperCamelCase ( self : Optional[int] , snake_case : Any ): '''simple docstring''' return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
296
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets A__ : Tuple = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' A__ : int = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' A__ : Dict = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ (datasets.Metric ): """simple docstring""" def lowercase_ ( self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="binary" , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase : str = fa_score( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE ) return {"f1": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
185
"""simple docstring""" def _lowercase ( __snake_case ,__snake_case ) -> float: if digit_amount > 0: return round(number - int(__snake_case ) ,__snake_case ) return number - int(__snake_case ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
269
0
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) class lowercase_ ( __snake_case ): _lowerCamelCase = CLIPConfig _lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase_ ): super().__init__(lowercase_ ) _snake_case : List[str] = CLIPVisionModelWithProjection(config.vision_config ) _snake_case : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 ) _snake_case : str = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=0.5 , lowercase_=0.5 ): _snake_case : Optional[Any] = self.vision_model(lowercase_ )[0] _snake_case : Optional[Any] = self.p_head(lowercase_ ) _snake_case : Dict = nsfw_detected.flatten() _snake_case : int = nsfw_detected > p_threshold _snake_case : str = nsfw_detected.tolist() if any(lowercase_ ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(lowercase_ ): if nsfw_detected_: _snake_case : Optional[int] = np.zeros(images[idx].shape ) _snake_case : int = self.w_head(lowercase_ ) _snake_case : List[str] = watermark_detected.flatten() _snake_case : Tuple = watermark_detected > w_threshold _snake_case : str = watermark_detected.tolist() if any(lowercase_ ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(lowercase_ ): if watermark_detected_: _snake_case : Optional[int] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
284
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowercase_ ( __snake_case ): _lowerCamelCase = 'M-CLIP' def __init__( self , lowercase_=1_024 , lowercase_=768 , **lowercase_ ): _snake_case : str = transformerDimSize _snake_case : Union[str, Any] = imageDimSize super().__init__(**lowercase_ ) class lowercase_ ( __snake_case ): _lowerCamelCase = MCLIPConfig def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ): super().__init__(lowercase_ , *lowercase_ , **lowercase_ ) _snake_case : List[Any] = XLMRobertaModel(lowercase_ ) _snake_case : int = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def UpperCamelCase ( self , lowercase_ , lowercase_ ): _snake_case : Tuple = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0] _snake_case : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowercase_ ), embs
284
1
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
198
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class UpperCAmelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> List[str]: super().__init__() lowercase__ : List[str] = model lowercase__ : Dict = 2 lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase( self ) -> str: pass def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # load longformer model from model identifier lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase ) lowercase__ : List[str] = LightningModel(UpperCAmelCase ) lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCAmelCase ) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a: List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __a: Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
198
1
from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=A_ ): UpperCAmelCase__ : int = ["flax", "transformers"] def __init__(self : List[Any] , *_A : Any , **_A : Any ) -> Union[str, Any]: requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : List[Any] , *_A : Optional[int] , **_A : int ) -> Dict: requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : Union[str, Any] , *_A : Dict , **_A : List[Any] ) -> Dict: requires_backends(cls , ["flax", "transformers"] ) class lowerCamelCase ( metaclass=A_ ): UpperCAmelCase__ : List[str] = ["flax", "transformers"] def __init__(self : Union[str, Any] , *_A : int , **_A : Optional[int] ) -> List[Any]: requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ) -> Tuple: requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : str , *_A : List[str] , **_A : Tuple ) -> Union[str, Any]: requires_backends(cls , ["flax", "transformers"] ) class lowerCamelCase ( metaclass=A_ ): UpperCAmelCase__ : Optional[Any] = ["flax", "transformers"] def __init__(self : str , *_A : str , **_A : Union[str, Any] ) -> Optional[int]: requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : Optional[Any] , *_A : Any , **_A : Optional[int] ) -> Any: requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : Tuple , *_A : Tuple , **_A : List[Any] ) -> List[Any]: requires_backends(cls , ["flax", "transformers"] ) class lowerCamelCase ( metaclass=A_ ): UpperCAmelCase__ : int = ["flax", "transformers"] def __init__(self : Any , *_A : str , **_A : Dict ) -> Dict: requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : str , *_A : Optional[int] , **_A : str ) -> Tuple: requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase(cls : List[str] , *_A : Union[str, Any] , **_A : Dict ) -> str: requires_backends(cls , ["flax", "transformers"] )
361
from __future__ import annotations from decimal import Decimal from numpy import array def lowercase_ ( A__ ) -> list[list[float]]: """simple docstring""" snake_case = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix snake_case = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creates a copy of the matrix with swapped positions of the elements snake_case = [[0.0, 0.0], [0.0, 0.0]] snake_case , snake_case = matrix[1][1], matrix[0][0] snake_case , snake_case = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(A__ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule snake_case = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creating cofactor matrix snake_case = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) snake_case = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) snake_case = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) snake_case = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) snake_case = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) snake_case = array(A__ ) for i in range(3 ): for j in range(3 ): snake_case = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix snake_case = array(A__ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(A__ ) # Calculate the inverse of the matrix return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
137
0
"""simple docstring""" import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE = old_name if "patch_embed" in old_name: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = old_name.split('.' ) if layer == "0": SCREAMING_SNAKE_CASE = old_name.replace('0' , 'convolution1' ) elif layer == "1": SCREAMING_SNAKE_CASE = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": SCREAMING_SNAKE_CASE = old_name.replace('3' , 'convolution2' ) else: SCREAMING_SNAKE_CASE = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(R'\d\.\d' , SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = R'\b\d{2}\b' if bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): SCREAMING_SNAKE_CASE = re.search(R'\d\.\d\d.' , SCREAMING_SNAKE_CASE_ ).group() else: SCREAMING_SNAKE_CASE = re.search(R'\d\.\d.' , SCREAMING_SNAKE_CASE_ ).group() if int(match[0] ) < 6: SCREAMING_SNAKE_CASE = old_name.replace(SCREAMING_SNAKE_CASE_ , '' ) SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) SCREAMING_SNAKE_CASE = 'intermediate_stages.' + trimmed_name else: SCREAMING_SNAKE_CASE = old_name.replace(SCREAMING_SNAKE_CASE_ , '' ) if int(match[2] ) < num_meta4D_last_stage: SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: SCREAMING_SNAKE_CASE = str(int(match[2] ) - num_meta4D_last_stage ) SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('fc2' , 'linear_out' ) SCREAMING_SNAKE_CASE = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(R'.\d.' , SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): SCREAMING_SNAKE_CASE = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): SCREAMING_SNAKE_CASE = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: SCREAMING_SNAKE_CASE = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": SCREAMING_SNAKE_CASE = new_name.replace('norm' , 'layernorm' ) SCREAMING_SNAKE_CASE = 'efficientformer.' + new_name else: SCREAMING_SNAKE_CASE = 'efficientformer.encoder.' + new_name return new_name def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]: for key in checkpoint.copy().keys(): SCREAMING_SNAKE_CASE = checkpoint.pop(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = val return checkpoint def lowercase () -> str: SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return image def lowercase (SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : bool ) -> Optional[int]: SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model'] SCREAMING_SNAKE_CASE = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) SCREAMING_SNAKE_CASE = config.depths[-1] - config.num_metaad_blocks + 1 SCREAMING_SNAKE_CASE = convert_torch_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() SCREAMING_SNAKE_CASE = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = 2_56 SCREAMING_SNAKE_CASE = 2_24 SCREAMING_SNAKE_CASE = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) SCREAMING_SNAKE_CASE = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values # original processing pipeline SCREAMING_SNAKE_CASE = Compose( [ Resize(SCREAMING_SNAKE_CASE_ , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(SCREAMING_SNAKE_CASE_ ), ToTensor(), Normalize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), ] ) SCREAMING_SNAKE_CASE = image_transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = (1, 10_00) if "l1" in model_name: SCREAMING_SNAKE_CASE = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] ) assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: SCREAMING_SNAKE_CASE = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] ) assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: SCREAMING_SNAKE_CASE = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] ) assert logits.shape == expected_shape else: raise ValueError( F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' ) # Save Checkpoints Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'Processor successfuly saved at {pytorch_dump_path}' ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) processor.push_to_hub( repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) __UpperCamelCase = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
113
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
46
0
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = logging.get_logger() # the current default level is logging.WARNING snake_case_ = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(snake_case ) def a ( self ): snake_case_ = logging.get_verbosity() snake_case_ = logging.get_logger('transformers.models.bart.tokenization_bart' ) snake_case_ = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(snake_case ) as cl: logger.warning(snake_case ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(snake_case ) as cl: logger.warning(snake_case ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(snake_case ) as cl: logger.warning(snake_case ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(snake_case ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var snake_case_ = logging.get_logger('transformers.models.bart.tokenization_bart' ) snake_case_ = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case ) snake_case_ = logging.log_levels[env_level_str] snake_case_ = logging.get_verbosity() self.assertEqual( snake_case , snake_case , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , ) # restore to the original level snake_case_ = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() snake_case_ = logging.logging.getLogger() with CaptureLogger(snake_case ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def a ( self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() snake_case_ = logging.get_logger('transformers.models.bart.tokenization_bart' ) snake_case_ = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(snake_case ) as cl: logger.warning_advice(snake_case ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(snake_case ) as cl: logger.warning_advice(snake_case ) self.assertEqual(cl.out , msg + '\n' ) def __lowerCamelCase ( ): '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
200
from __future__ import annotations import os from typing import Any import requests _UpperCAmelCase : int = """https://api.github.com""" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user _UpperCAmelCase : Dict = BASE_URL + """/user""" # https://github.com/settings/tokens _UpperCAmelCase : Optional[Any] = os.environ.get("""USER_TOKEN""", """""") def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = { 'Authorization': F'''token {auth_token}''', 'Accept': 'application/vnd.github.v3+json', } return requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F'''{key}: {value}''') else: raise ValueError("""'USER_TOKEN' field cannot be empty.""")
200
1