code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
def snake_case_ ( snake_case , snake_case ) -> str: if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) lowercase__: str = str(bin(snake_case ) ) binary_number += "0" * shift_amount return binary_number def snake_case_ ( snake_case , snake_case ) -> str: if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) lowercase__: Optional[Any] = str(bin(snake_case ) )[2:] if shift_amount >= len(snake_case ): return "0b0" lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount] return "0b" + shifted_binary_number def snake_case_ ( snake_case , snake_case ) -> str: if number >= 0: # Get binary representation of positive number lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:] lowercase__: Any = ( '1' + '0' * (binary_number_length - len(snake_case )) + binary_number ) if shift_amount >= len(snake_case ): return "0b" + binary_number[0] * len(snake_case ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(snake_case ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
196
__lowerCAmelCase = range(2, 20 + 1) __lowerCAmelCase = [10**k for k in range(ks[-1] + 1)] __lowerCAmelCase = {} def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Optional[int]: lowercase__: str = sum(a_i[j] for j in range(snake_case , len(snake_case ) ) ) lowercase__: Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(snake_case ) , snake_case ) ) ) lowercase__ , lowercase__: str = 0, 0 lowercase__: Tuple = n - i lowercase__: Dict = memo.get(snake_case ) if sub_memo is not None: lowercase__: Optional[Any] = sub_memo.get(snake_case ) if jumps is not None and len(snake_case ) > 0: # find and make the largest jump without going over lowercase__: int = -1 for _k in range(len(snake_case ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: lowercase__: Union[str, Any] = _k break if max_jump >= 0: lowercase__ , lowercase__ , lowercase__: Any = jumps[max_jump] # since the difference between jumps is cached, add c lowercase__: str = diff + c for j in range(min(snake_case , len(snake_case ) ) ): lowercase__ , lowercase__: Dict = divmod(snake_case , 10 ) if new_c > 0: add(snake_case , snake_case , snake_case ) else: lowercase__: List[Any] = [] else: lowercase__: Optional[Any] = {c: []} lowercase__: Union[str, Any] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps lowercase__ , lowercase__: Union[str, Any] = next_term(snake_case , k - 1 , i + dn , snake_case ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead lowercase__ , lowercase__: Dict = compute(snake_case , snake_case , i + dn , snake_case ) diff += _diff dn += terms_jumped lowercase__: Any = sub_memo[c] # keep jumps sorted by # of terms skipped lowercase__: str = 0 while j < len(snake_case ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(snake_case , (diff, dn, k) ) return (diff, dn) def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> str: if i >= n: return 0, i if k > len(snake_case ): a_i.extend([0 for _ in range(k - len(snake_case ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) lowercase__: List[Any] = i lowercase__ , lowercase__ , lowercase__: Any = 0, 0, 0 for j in range(len(snake_case ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 lowercase__: str = ds_c + ds_b diff += addend lowercase__: List[str] = 0 for j in range(snake_case ): lowercase__: Any = a_i[j] + addend lowercase__ , lowercase__: List[Any] = divmod(snake_case , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(snake_case , snake_case , snake_case ) return diff, i - start_i def snake_case_ ( snake_case , snake_case , snake_case ) -> int: for j in range(snake_case , len(snake_case ) ): lowercase__: str = digits[j] + addend if s >= 10: lowercase__ , lowercase__: Any = divmod(snake_case , 10 ) lowercase__: Any = addend // 10 + quotient else: lowercase__: Union[str, Any] = s lowercase__: Union[str, Any] = addend // 10 if addend == 0: break while addend > 0: lowercase__ , lowercase__: Union[str, Any] = divmod(snake_case , 10 ) digits.append(snake_case ) def snake_case_ ( snake_case = 10**15 ) -> int: lowercase__: Optional[Any] = [1] lowercase__: int = 1 lowercase__: Tuple = 0 while True: lowercase__ , lowercase__: str = next_term(snake_case , 20 , i + dn , snake_case ) dn += terms_jumped if dn == n - i: break lowercase__: Dict = 0 for j in range(len(snake_case ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
196
1
"""simple docstring""" import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __lowerCamelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ): '''simple docstring''' def __init__( self : int , a_ : Optional[Any]=None , **a_ : Dict ): super().__init__(features=_UpperCAmelCase ) lowerCAmelCase_ : str = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase ( self : Any , a_ : Tuple ): import torch if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and column: if all( isinstance(_UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(_UpperCAmelCase ) return column def lowerCamelCase ( self : List[str] , a_ : str ): import torch if isinstance(_UpperCAmelCase , (str, bytes, type(_UpperCAmelCase )) ): return value elif isinstance(_UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() lowerCAmelCase_ : Tuple = {} if isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): lowerCAmelCase_ : Union[str, Any] = {'dtype': torch.intaa} elif isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): lowerCAmelCase_ : str = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(_UpperCAmelCase , PIL.Image.Image ): lowerCAmelCase_ : str = np.asarray(_UpperCAmelCase ) return torch.tensor(_UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase ( self : Union[str, Any] , a_ : Union[str, Any] ): import torch # support for torch, tf, jax etc. if hasattr(_UpperCAmelCase , "__array__" ) and not isinstance(_UpperCAmelCase , torch.Tensor ): lowerCAmelCase_ : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(_UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(_UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(_UpperCAmelCase ) def lowerCamelCase ( self : Optional[int] , a_ : dict ): return map_nested(self._recursive_tensorize , _UpperCAmelCase , map_list=_UpperCAmelCase ) def lowerCamelCase ( self : List[str] , a_ : pa.Table ): lowerCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_row(_UpperCAmelCase ) lowerCAmelCase_ : Dict = self.python_features_decoder.decode_row(_UpperCAmelCase ) return self.recursive_tensorize(_UpperCAmelCase ) def lowerCamelCase ( self : Optional[int] , a_ : pa.Table ): lowerCAmelCase_ : int = self.numpy_arrow_extractor().extract_column(_UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = self.python_features_decoder.decode_column(_UpperCAmelCase , pa_table.column_names[0] ) lowerCAmelCase_ : str = self.recursive_tensorize(_UpperCAmelCase ) lowerCAmelCase_ : int = self._consolidate(_UpperCAmelCase ) return column def lowerCamelCase ( self : str , a_ : pa.Table ): lowerCAmelCase_ : Optional[Any] = self.numpy_arrow_extractor().extract_batch(_UpperCAmelCase ) lowerCAmelCase_ : List[str] = self.python_features_decoder.decode_batch(_UpperCAmelCase ) lowerCAmelCase_ : List[str] = self.recursive_tensorize(_UpperCAmelCase ) for column_name in batch: lowerCAmelCase_ : Union[str, Any] = self._consolidate(batch[column_name] ) return batch
369
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """SCUT-DLVCLab/lilt-roberta-en-base""": ( """https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json""" ), } class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : List[Any] = """lilt""" def __init__( self : Any , a_ : List[str]=3_05_22 , a_ : List[Any]=7_68 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : str=30_72 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=5_12 , a_ : List[str]=2 , a_ : int=0.02 , a_ : Optional[int]=1e-1_2 , a_ : Any=0 , a_ : str="absolute" , a_ : List[Any]=None , a_ : Optional[int]=4 , a_ : str=10_24 , **a_ : Union[str, Any] , ): super().__init__(pad_token_id=a_ , **a_ ) lowerCAmelCase_ : List[str] = vocab_size lowerCAmelCase_ : List[str] = hidden_size lowerCAmelCase_ : int = num_hidden_layers lowerCAmelCase_ : Any = num_attention_heads lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : str = intermediate_size lowerCAmelCase_ : List[str] = hidden_dropout_prob lowerCAmelCase_ : Any = attention_probs_dropout_prob lowerCAmelCase_ : int = max_position_embeddings lowerCAmelCase_ : Any = type_vocab_size lowerCAmelCase_ : List[Any] = initializer_range lowerCAmelCase_ : str = layer_norm_eps lowerCAmelCase_ : Tuple = position_embedding_type lowerCAmelCase_ : Union[str, Any] = classifier_dropout lowerCAmelCase_ : Optional[Any] = channel_shrink_ratio lowerCAmelCase_ : Dict = max_ad_position_embeddings
161
0
"""simple docstring""" import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = BertJapaneseTokenizer SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> str: super().setUp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは', '世界', '##世界', '、', '##、', '。', '##。', ] snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = 'こんにちは、世界。 \nこんばんは、世界。' snake_case_ = 'こんにちは 、 世界 。 こんばんは 、 世界 。' return input_text, output_text def a_ ( self, lowerCAmelCase__) -> Optional[Any]: snake_case_ , snake_case_ = self.get_input_output_texts(lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.decode(lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__) return text, ids def a_ ( self) -> Dict: pass # TODO add if relevant def a_ ( self) -> Optional[Any]: pass # TODO add if relevant def a_ ( self) -> Dict: pass # TODO add if relevant def a_ ( self) -> Union[str, Any]: snake_case_ = self.tokenizer_class(self.vocab_file) snake_case_ = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。') self.assertListEqual(lowerCAmelCase__, ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [3, 12, 10, 14, 4, 9, 12, 10, 14]) def a_ ( self) -> str: snake_case_ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='mecab') self.assertIsNotNone(lowerCAmelCase__) snake_case_ = 'こんにちは、世界。\nこんばんは、世界。' snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [3, 12, 10, 14, 4, 9, 12, 10, 14]) snake_case_ = os.path.join(self.tmpdirname, 'tokenizer.bin') with open(lowerCAmelCase__, 'wb') as handle: pickle.dump(lowerCAmelCase__, lowerCAmelCase__) with open(lowerCAmelCase__, 'rb') as handle: snake_case_ = pickle.load(lowerCAmelCase__) snake_case_ = tokenizer_new.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = MecabTokenizer(mecab_dic='ipadic') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'], ) def a_ ( self) -> Any: try: snake_case_ = MecabTokenizer(mecab_dic='unidic_lite') except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'], ) def a_ ( self) -> List[str]: try: snake_case_ = MecabTokenizer(mecab_dic='unidic') except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'], ) def a_ ( self) -> Any: snake_case_ = MecabTokenizer(do_lower_case=lowerCAmelCase__, mecab_dic='ipadic') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'], ) def a_ ( self) -> List[str]: try: snake_case_ = MecabTokenizer( do_lower_case=lowerCAmelCase__, normalize_text=lowerCAmelCase__, mecab_option='-d /usr/local/lib/mecab/dic/jumandic') except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'], ) def a_ ( self) -> Optional[Any]: snake_case_ = MecabTokenizer(normalize_text=lowerCAmelCase__, mecab_dic='ipadic') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'], ) @require_sudachi def a_ ( self) -> Optional[Any]: snake_case_ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='sudachi') self.assertIsNotNone(lowerCAmelCase__) snake_case_ = 'こんにちは、世界。\nこんばんは、世界。' snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [3, 12, 10, 14, 4, 9, 12, 10, 14]) snake_case_ = os.path.join(self.tmpdirname, 'tokenizer.bin') with open(lowerCAmelCase__, 'wb') as handle: pickle.dump(lowerCAmelCase__, lowerCAmelCase__) with open(lowerCAmelCase__, 'rb') as handle: snake_case_ = pickle.load(lowerCAmelCase__) snake_case_ = tokenizer_new.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @require_sudachi def a_ ( self) -> Optional[int]: snake_case_ = SudachiTokenizer(sudachi_dict_type='core') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '], ) @require_sudachi def a_ ( self) -> List[Any]: snake_case_ = SudachiTokenizer(sudachi_dict_type='core', sudachi_split_mode='A') self.assertListEqual(tokenizer.tokenize('外国人参政権'), ['外国', '人', '参政', '権']) @require_sudachi def a_ ( self) -> int: snake_case_ = SudachiTokenizer(sudachi_dict_type='core', sudachi_split_mode='B') self.assertListEqual(tokenizer.tokenize('外国人参政権'), ['外国人', '参政権']) @require_sudachi def a_ ( self) -> Dict: snake_case_ = SudachiTokenizer(sudachi_dict_type='core', sudachi_split_mode='C') self.assertListEqual(tokenizer.tokenize('外国人参政権'), ['外国人参政権']) @require_sudachi def a_ ( self) -> Any: snake_case_ = SudachiTokenizer(do_lower_case=lowerCAmelCase__, sudachi_dict_type='core') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '], ) @require_sudachi def a_ ( self) -> Dict: snake_case_ = SudachiTokenizer(normalize_text=lowerCAmelCase__, sudachi_dict_type='core') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '], ) @require_sudachi def a_ ( self) -> Optional[int]: snake_case_ = SudachiTokenizer(trim_whitespace=lowerCAmelCase__, sudachi_dict_type='core') self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'], ) @require_jumanpp def a_ ( self) -> Optional[int]: snake_case_ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='jumanpp') self.assertIsNotNone(lowerCAmelCase__) snake_case_ = 'こんにちは、世界。\nこんばんは、世界。' snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [3, 12, 10, 14, 4, 9, 12, 10, 14]) snake_case_ = os.path.join(self.tmpdirname, 'tokenizer.bin') with open(lowerCAmelCase__, 'wb') as handle: pickle.dump(lowerCAmelCase__, lowerCAmelCase__) with open(lowerCAmelCase__, 'rb') as handle: snake_case_ = pickle.load(lowerCAmelCase__) snake_case_ = tokenizer_new.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @require_jumanpp def a_ ( self) -> List[Any]: snake_case_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'], ) @require_jumanpp def a_ ( self) -> List[Any]: snake_case_ = JumanppTokenizer(do_lower_case=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'], ) @require_jumanpp def a_ ( self) -> List[str]: snake_case_ = JumanppTokenizer(normalize_text=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'], ) @require_jumanpp def a_ ( self) -> Optional[int]: snake_case_ = JumanppTokenizer(trim_whitespace=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 '), ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'], ) @require_jumanpp def a_ ( self) -> Tuple: snake_case_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。'), ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'], ) def a_ ( self) -> Optional[int]: snake_case_ = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは'] snake_case_ = {} for i, token in enumerate(lowerCAmelCase__): snake_case_ = i snake_case_ = WordpieceTokenizer(vocab=lowerCAmelCase__, unk_token='[UNK]') self.assertListEqual(tokenizer.tokenize(''), []) self.assertListEqual(tokenizer.tokenize('こんにちは'), ['こんにちは']) self.assertListEqual(tokenizer.tokenize('こんばんは'), ['こん', '##ばんは']) self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは'), ['こん', '##ばんは', '[UNK]', 'こんにちは']) def a_ ( self) -> List[str]: snake_case_ = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp') snake_case_ = tokenizer.subword_tokenizer snake_case_ = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。') self.assertListEqual(lowerCAmelCase__, ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。']) snake_case_ = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは') self.assertListEqual(lowerCAmelCase__, ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは']) def a_ ( self) -> Optional[Any]: snake_case_ = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese') snake_case_ = tokenizer.encode('ありがとう。', add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.encode('どういたしまして。', add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = BertJapaneseTokenizer SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Union[str, Any]: super().setUp() snake_case_ = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) def a_ ( self, **lowerCAmelCase__) -> Dict: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='character', **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> List[Any]: snake_case_ = 'こんにちは、世界。 \nこんばんは、世界。' snake_case_ = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。' return input_text, output_text def a_ ( self) -> Union[str, Any]: pass # TODO add if relevant def a_ ( self) -> List[str]: pass # TODO add if relevant def a_ ( self) -> Dict: pass # TODO add if relevant def a_ ( self) -> Dict: snake_case_ = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='character') snake_case_ = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。') self.assertListEqual( lowerCAmelCase__, ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]) def a_ ( self) -> Any: snake_case_ = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] snake_case_ = {} for i, token in enumerate(lowerCAmelCase__): snake_case_ = i snake_case_ = CharacterTokenizer(vocab=lowerCAmelCase__, unk_token='[UNK]') self.assertListEqual(tokenizer.tokenize(''), []) self.assertListEqual(tokenizer.tokenize('こんにちは'), ['こ', 'ん', 'に', 'ち', 'は']) self.assertListEqual(tokenizer.tokenize('こんにちほ'), ['こ', 'ん', 'に', 'ち', '[UNK]']) def a_ ( self) -> str: snake_case_ = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char') snake_case_ = tokenizer.encode('ありがとう。', add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.encode('どういたしまして。', add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> str: snake_case_ = 'cl-tohoku/bert-base-japanese' snake_case_ = AutoTokenizer.from_pretrained(lowerCAmelCase__) self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__) class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> List[Any]: snake_case_ = 'cl-tohoku/bert-base-japanese' with self.assertLogs('transformers', level='WARNING') as cm: BertTokenizer.from_pretrained(lowerCAmelCase__) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.')) snake_case_ = 'bert-base-cased' with self.assertLogs('transformers', level='WARNING') as cm: BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.'))
69
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) _SCREAMING_SNAKE_CASE = 'sshleifer/student_marian_en_ro_6_1' _SCREAMING_SNAKE_CASE = 'sshleifer/tiny-mbart' @require_torch class a ( __lowerCAmelCase ): """simple docstring""" def UpperCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> int: _A = self.run_trainer( eval_steps=1 , max_len=12 , model_name=lowerCAmelCase_ , num_train_epochs=1 , distributed=lowerCAmelCase_ , extra_args_str=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , do_predict=lowerCAmelCase_ , ) _A = TrainerState.load_from_json(os.path.join(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history if not do_eval: return _A = [log for log in logs if """eval_loss""" in log.keys()] _A = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _A = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase_ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCAmelCase ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCAmelCase ( self ) -> Dict: self.run_seqaseq_quick(distributed=lowerCAmelCase_ ) @require_torch_multi_gpu def UpperCAmelCase ( self ) -> Dict: self.run_seqaseq_quick(distributed=lowerCAmelCase_ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase ( self ) -> str: self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase ( self ) -> Dict: self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase ( self ) -> Optional[Any]: self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCAmelCase_ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase ( self ) -> Tuple: self.run_seqaseq_quick( distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCAmelCase_ ) @require_apex @require_torch_gpu def UpperCAmelCase ( self ) -> int: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout _A = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } _A = experiments[experiment_id] _A = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} _A = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**lowerCAmelCase_ , extra_args_str=data["""extra_args_str"""] ) _A = len(re.findall(lowerCAmelCase_ , cl.err ) ) self.assertEqual(lowerCAmelCase_ , data["""n_matches"""] ) @slow def UpperCAmelCase ( self ) -> Dict: _A = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=lowerCAmelCase_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowerCAmelCase_ , ) # Check metrics _A = TrainerState.load_from_json(os.path.join(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history _A = [log for log in logs if """eval_loss""" in log.keys()] _A = eval_metrics[0] _A = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase_ ) # test if do_predict saves generations and metrics _A = os.listdir(lowerCAmelCase_ ) _A = {os.path.basename(lowerCAmelCase_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCAmelCase ( self ) -> Optional[Any]: from transformers.training_args import OptimizerNames def train_and_return_metrics(lowerCAmelCase_ ) -> Tuple[int, float]: _A = """--skip_memory_metrics 0""" _A = self.run_trainer( max_len=1_28 , model_name=lowerCAmelCase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowerCAmelCase_ , distributed=lowerCAmelCase_ , extra_args_str=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , do_predict=lowerCAmelCase_ , n_gpus_to_use=1 , ) # Check metrics _A = TrainerState.load_from_json(Path(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history _A = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) _A = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) _A = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _A = gpu_peak_mem_orig + gpu_alloc_mem_orig _A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _A = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _A = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( lowerCAmelCase_ , lowerCAmelCase_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( lowerCAmelCase_ , lowerCAmelCase_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( lowerCAmelCase_ , lowerCAmelCase_ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3E-3 , lowerCAmelCase_ = "adafactor" , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , ) -> str: _A = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" _A = self.get_auto_remove_tmp_dir() _A = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(lowerCAmelCase_ )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(lowerCAmelCase_ )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() _A = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(lowerCAmelCase_ )} '''.split() _A = """ --do_predict """.split() _A = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _A = get_gpu_count() _A = get_torch_dist_unique_port() _A = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() _A = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() ) else: _A = ["""run_translation.py"""] + args with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ): main() return output_dir
180
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE_:Optional[Any] = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Tuple = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
358
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:Dict = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : int = "altclip_text_model" def __init__( self, lowerCamelCase__=25_0002, lowerCamelCase__=1024, lowerCamelCase__=24, lowerCamelCase__=16, lowerCamelCase__=4096, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=514, lowerCamelCase__=1, lowerCamelCase__=0.02, lowerCamelCase__=0.02, lowerCamelCase__=1e-05, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=768, **lowerCamelCase__, ): super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ ) A : Union[str, Any] = vocab_size A : Dict = hidden_size A : Union[str, Any] = num_hidden_layers A : List[str] = num_attention_heads A : str = hidden_act A : Dict = intermediate_size A : List[str] = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : Tuple = max_position_embeddings A : Optional[Any] = type_vocab_size A : Optional[Any] = initializer_range A : Optional[int] = initializer_factor A : Tuple = layer_norm_eps A : List[str] = position_embedding_type A : int = use_cache A : int = project_dim class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = "altclip_vision_model" def __init__( self, lowerCamelCase__=768, lowerCamelCase__=3072, lowerCamelCase__=512, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=32, lowerCamelCase__="quick_gelu", lowerCamelCase__=1e-5, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : Optional[Any] = hidden_size A : Optional[int] = intermediate_size A : Union[str, Any] = projection_dim A : str = num_hidden_layers A : int = num_attention_heads A : Optional[Any] = num_channels A : Tuple = patch_size A : List[Any] = image_size A : Optional[int] = initializer_range A : Union[str, Any] = initializer_factor A : List[str] = attention_dropout A : int = layer_norm_eps A : str = hidden_act @classmethod def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ): cls._set_token_in_kwargs(lowerCamelCase__ ) A , A : Optional[Any] = cls.get_config_dict(lowerCamelCase__, **lowerCamelCase__ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("""model_type""" ) == "altclip": A : Any = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase__, **lowerCamelCase__ ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : List[Any] = "altclip" __lowerCamelCase : List[Any] = True def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=768, lowerCamelCase__=2.6592, **lowerCamelCase__ ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). A : Dict = kwargs.pop("""text_config_dict""", lowerCamelCase__ ) A : str = kwargs.pop("""vision_config_dict""", lowerCamelCase__ ) super().__init__(**lowerCamelCase__ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: A : Dict = {} # This is the complete result when using `text_config_dict`. A : str = AltCLIPTextConfig(**lowerCamelCase__ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: A : Optional[Any] = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: A : Optional[int] = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCamelCase__ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: A : int = {} # This is the complete result when using `vision_config_dict`. A : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: A : Optional[int] = { str(lowerCamelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: A : Optional[int] = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: A : Any = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCamelCase__ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: A : Tuple = {} logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" ) if vision_config is None: A : Union[str, Any] = {} logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" ) A : Dict = AltCLIPTextConfig(**lowerCamelCase__ ) A : Optional[int] = AltCLIPVisionConfig(**lowerCamelCase__ ) A : List[str] = projection_dim A : Any = logit_scale_init_value A : Tuple = 1.0 @classmethod def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : str = copy.deepcopy(self.__dict__ ) A : Any = self.text_config.to_dict() A : List[str] = self.vision_config.to_dict() A : Union[str, Any] = self.__class__.model_type return output
115
0
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> list: """simple docstring""" lowerCAmelCase_ : Tuple = len(a_ ) lowerCAmelCase_ : Tuple = [] for i in range(len(a_ ) - pat_len + 1 ): lowerCAmelCase_ : List[Any] = True for j in range(a_ ): if s[i + j] != pattern[j]: lowerCAmelCase_ : Union[str, Any] = False break if match_found: position.append(a_ ) return position if __name__ == "__main__": assert naive_pattern_search("""ABCDEFG""", """DE""") == [3] print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
241
def A ( a_ ,a_ ,a_ ) -> int: def update_area_of_max_square(a_ ,a_ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __UpperCamelCase : Optional[int] =update_area_of_max_square(a_ ,col + 1 ) __UpperCamelCase : List[str] =update_area_of_max_square(row + 1 ,col + 1 ) __UpperCamelCase : List[Any] =update_area_of_max_square(row + 1 ,a_ ) if mat[row][col]: __UpperCamelCase : Optional[Any] =1 + min([right, diagonal, down] ) __UpperCamelCase : Dict =max(largest_square_area[0] ,a_ ) return sub_problem_sol else: return 0 __UpperCamelCase : Union[str, Any] =[0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def A ( a_ ,a_ ,a_ ) -> int: def update_area_of_max_square_using_dp_array( a_ ,a_ ,a_ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __UpperCamelCase : Tuple =update_area_of_max_square_using_dp_array(a_ ,col + 1 ,a_ ) __UpperCamelCase : Optional[int] =update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,a_ ) __UpperCamelCase : Any =update_area_of_max_square_using_dp_array(row + 1 ,a_ ,a_ ) if mat[row][col]: __UpperCamelCase : Optional[Any] =1 + min([right, diagonal, down] ) __UpperCamelCase : str =max(largest_square_area[0] ,a_ ) __UpperCamelCase : Any =sub_problem_sol return sub_problem_sol else: return 0 __UpperCamelCase : Tuple =[0] __UpperCamelCase : List[Any] =[[-1] * cols for _ in range(a_ )] update_area_of_max_square_using_dp_array(0 ,0 ,a_ ) return largest_square_area[0] def A ( a_ ,a_ ,a_ ) -> int: __UpperCamelCase : Dict =[[0] * (cols + 1) for _ in range(rows + 1 )] __UpperCamelCase : int =0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): __UpperCamelCase : Optional[Any] =dp_array[row][col + 1] __UpperCamelCase : int =dp_array[row + 1][col + 1] __UpperCamelCase : Tuple =dp_array[row + 1][col] if mat[row][col] == 1: __UpperCamelCase : Tuple =1 + min(a_ ,a_ ,a_ ) __UpperCamelCase : Any =max(dp_array[row][col] ,a_ ) else: __UpperCamelCase : Dict =0 return largest_square_area def A ( a_ ,a_ ,a_ ) -> int: __UpperCamelCase : Any =[0] * (cols + 1) __UpperCamelCase : List[Any] =[0] * (cols + 1) __UpperCamelCase : Tuple =0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): __UpperCamelCase : Any =current_row[col + 1] __UpperCamelCase : Optional[Any] =next_row[col + 1] __UpperCamelCase : Union[str, Any] =next_row[col] if mat[row][col] == 1: __UpperCamelCase : Any =1 + min(a_ ,a_ ,a_ ) __UpperCamelCase : Optional[int] =max(current_row[col] ,a_ ) else: __UpperCamelCase : List[str] =0 __UpperCamelCase : Optional[Any] =current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
71
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( A__ , unittest.TestCase ): """simple docstring""" snake_case_ = KandinskyVaaControlnetImgaImgPipeline snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"] snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"] snake_case_ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] snake_case_ = False @property def lowerCAmelCase ( self : Dict )-> str: return 32 @property def lowerCAmelCase ( self : int )-> List[str]: return 32 @property def lowerCAmelCase ( self : List[Any] )-> str: return self.time_input_dim @property def lowerCAmelCase ( self : Optional[Any] )-> Any: return self.time_input_dim * 4 @property def lowerCAmelCase ( self : str )-> Union[str, Any]: return 1_00 @property def lowerCAmelCase ( self : Tuple )-> Optional[Any]: torch.manual_seed(0 ) snake_case = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } snake_case = UNetaDConditionModel(**__snake_case ) return model @property def lowerCAmelCase ( self : List[Any] )-> str: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : str )-> List[str]: torch.manual_seed(0 ) snake_case = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : int )-> Dict: snake_case = self.dummy_unet snake_case = self.dummy_movq snake_case = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } snake_case = DDIMScheduler(**__snake_case ) snake_case = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]: snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case ) snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __snake_case ) # create init_image snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create hint snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) if str(__snake_case ).startswith("""mps""" ): snake_case = torch.manual_seed(__snake_case ) else: snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) snake_case = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowerCAmelCase ( self : Dict )-> Optional[int]: snake_case = """cpu""" snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**__snake_case ) snake_case = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = pipe(**self.get_dummy_inputs(__snake_case ) ) snake_case = output.images snake_case = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : List[str] )-> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] )-> Optional[int]: snake_case = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" ) snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) snake_case = init_image.resize((5_12, 5_12) ) snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0 snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) snake_case = """A robot, 4k photo""" snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) snake_case = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 ) snake_case , snake_case = pipe_prior( __snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple() snake_case = pipeline( image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , ) snake_case = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
356
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } _SCREAMING_SNAKE_CASE = { "openbmb/cpm-ant-10b": 1024, } def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str: snake_case = collections.OrderedDict() with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader: snake_case = reader.readlines() for index, token in enumerate(__lowerCAmelCase ): snake_case = token.rstrip("""\n""" ) snake_case = index return vocab class _lowerCAmelCase ( A__ ): """simple docstring""" def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]: snake_case = vocab snake_case = unk_token snake_case = max_input_chars_per_word def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]: snake_case = list(__snake_case ) if len(__snake_case ) > self.max_input_chars_per_word: return [self.unk_token] snake_case = 0 snake_case = [] while start < len(__snake_case ): snake_case = len(__snake_case ) snake_case = None while start < end: snake_case = """""".join(chars[start:end] ) if substr in self.vocab: snake_case = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__snake_case ) snake_case = end return sub_tokens class _lowerCAmelCase ( A__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = False def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]: requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , ) snake_case = bod_token snake_case = eod_token snake_case = load_vocab(__snake_case ) snake_case = self.encoder[space_token] snake_case = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) snake_case = {v: k for k, v in self.encoder.items()} snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowerCAmelCase ( self : Optional[int] )-> List[Any]: return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : str )-> Tuple: return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : str )-> List[str]: return self.encoder["\n"] @property def lowerCAmelCase ( self : List[Any] )-> int: return len(self.encoder ) def lowerCAmelCase ( self : Any )-> Any: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]: snake_case = [] for x in jieba.cut(__snake_case , cut_all=__snake_case ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) ) return output_tokens def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]: snake_case = [i for i in token_ids if i >= 0] snake_case = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__snake_case , **__snake_case ) def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]: return token in self.encoder def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str: return "".join(__snake_case ) def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]: return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str: return self.decoder.get(__snake_case , self.unk_token ) def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]: if os.path.isdir(__snake_case ): snake_case = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory snake_case = 0 if " " in self.encoder: snake_case = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: snake_case = self.encoder["""\n"""] del self.encoder["\n"] snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) snake_case = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) return [1] + ([0] * len(__snake_case ))
3
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __A : '''simple docstring''' def __init__( self : Tuple ,_snake_case : Union[str, Any] ,_snake_case : int=13 ,_snake_case : Tuple=7 ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Union[str, Any]=False ,_snake_case : List[Any]=True ,_snake_case : Any=99 ,_snake_case : str=32 ,_snake_case : int=5 ,_snake_case : int=4 ,_snake_case : int=37 ,_snake_case : Tuple="gelu" ,_snake_case : int=0.1 ,_snake_case : List[str]=0.1 ,_snake_case : Union[str, Any]=512 ,_snake_case : Optional[int]=16 ,_snake_case : int=2 ,_snake_case : List[Any]=0.02 ,_snake_case : Optional[int]=3 ,_snake_case : List[str]=4 ,_snake_case : Union[str, Any]=None ,) -> int: """simple docstring""" lowercase__ : Optional[int] = parent lowercase__ : Dict = batch_size lowercase__ : int = seq_length lowercase__ : Optional[int] = is_training lowercase__ : Optional[int] = use_input_mask lowercase__ : str = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : List[str] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Any = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : Dict = initializer_range lowercase__ : str = num_labels lowercase__ : Optional[Any] = num_choices lowercase__ : Union[str, Any] = scope def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase__ : Optional[Any] = None if self.use_input_mask: lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Union[str, Any] = None if self.use_token_type_ids: lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowercase__ : Tuple = None lowercase__ : List[str] = None lowercase__ : Tuple = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase__ : str = ids_tensor([self.batch_size] ,self.num_choices ) lowercase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,) def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[str] ,_snake_case : Dict ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : List[Any] ,_snake_case : str ) -> Any: """simple docstring""" lowercase__ : Any = LlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ) lowercase__ : Optional[Any] = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,) -> List[str]: """simple docstring""" lowercase__ : int = True lowercase__ : Optional[int] = LlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Optional[Any] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,) lowercase__ : List[Any] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,) lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : Any ,_snake_case : str ,_snake_case : int ,_snake_case : Dict ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : Any ,) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = LlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Tuple ,_snake_case : str ,_snake_case : Any ,) -> Dict: """simple docstring""" lowercase__ : Optional[Any] = True lowercase__ : Dict = True lowercase__ : Optional[int] = LlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowercase__ : Any = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,) lowercase__ : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase__ : Tuple = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase__ : Optional[int] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0] lowercase__ : str = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0] # select random slice lowercase__ : Optional[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) ) def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Union[str, Any] = config_and_inputs lowercase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowerCAmelCase : int = (LlamaForCausalLM,) if is_torch_available() else () lowerCAmelCase : int = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase : List[str] = False lowerCAmelCase : List[Any] = False def UpperCAmelCase ( self : Tuple ) -> str: """simple docstring""" lowercase__ : str = LlamaModelTester(self ) lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 ) def UpperCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Any ) -> str: """simple docstring""" lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : List[str] = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : Tuple = input_dict['''input_ids'''] lowercase__ : Tuple = input_ids.ne(1 ).to(_snake_case ) lowercase__ : List[str] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowercase__ : List[Any] = LlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = 3 lowercase__ : int = '''single_label_classification''' lowercase__ : List[str] = input_dict['''input_ids'''] lowercase__ : Any = input_ids.ne(1 ).to(_snake_case ) lowercase__ : Tuple = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowercase__ : Optional[Any] = LlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple = 3 lowercase__ : List[Any] = '''multi_label_classification''' lowercase__ : Any = input_dict['''input_ids'''] lowercase__ : Dict = input_ids.ne(1 ).to(_snake_case ) lowercase__ : int = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase__ : Union[str, Any] = LlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : int = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ids_tensor([1, 10] ,config.vocab_size ) lowercase__ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Optional[int] = LlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowercase__ : Dict = original_model(_snake_case ).last_hidden_state lowercase__ : str = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0} lowercase__ : Tuple = LlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowercase__ : Union[str, Any] = scaled_model(_snake_case ).last_hidden_state lowercase__ : Tuple = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) @require_torch class __A ( unittest.TestCase ): '''simple docstring''' @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def UpperCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" lowercase__ : List[str] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowercase__ : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' ) lowercase__ : Optional[Any] = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowercase__ : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowercase__ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_snake_case ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowercase__ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' ) lowercase__ : List[Any] = model(torch.tensor(_snake_case ) ) # Expected mean on dim = -1 lowercase__ : Tuple = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowercase__ : Optional[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_snake_case ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowercase__ : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' ) lowercase__ : str = model(torch.tensor(_snake_case ) ) # Expected mean on dim = -1 lowercase__ : List[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowercase__ : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def UpperCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" lowercase__ : Tuple = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowercase__ : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' ) lowercase__ : Optional[Any] = model(torch.tensor(_snake_case ) ) lowercase__ : Union[str, Any] = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,_snake_case ,atol=1e-2 ,rtol=1e-2 ) # fmt: off lowercase__ : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_snake_case ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('''Model is curently gated''' ) @slow def UpperCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" lowercase__ : Any = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' lowercase__ : str = '''Simply put, the theory of relativity states that ''' lowercase__ : List[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) lowercase__ : int = tokenizer.encode(_snake_case ,return_tensors='''pt''' ) lowercase__ : Any = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=_snake_case ) # greedy generation outputs lowercase__ : List[str] = model.generate(_snake_case ,max_new_tokens=64 ,top_p=_snake_case ,temperature=1 ,do_sample=_snake_case ) lowercase__ : Any = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_snake_case ) self.assertEqual(_snake_case ,_snake_case )
16
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=A_ ): '''simple docstring''' lowerCAmelCase : List[str] = ["torch", "torchsde"] def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''', '''torchsde'''] )
16
1
'''simple docstring''' def a ( __a = 3 , __a = 7 , __a = 1000000 ) -> Any: '''simple docstring''' UpperCamelCase__ :List[str] = 0 UpperCamelCase__ :List[str] = 1 for current_denominator in range(1 , limit + 1 ): UpperCamelCase__ :Tuple = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCamelCase__ :List[str] = current_numerator UpperCamelCase__ :Dict = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1000000))
360
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __snake_case = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def a ( __a ) -> Optional[int]: '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def a ( __a ) -> str: '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main UpperCamelCase__ :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__a , id=__a )
219
0
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } lowerCAmelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def _A ( A__ , A__ , A__ , A__ , A__ ): """simple docstring""" for attribute in key.split('''.''' ): __lowercase = getattr(A__ , A__ ) if weight_type is not None: __lowercase = getattr(A__ , A__ ).shape else: __lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _A ( A__ , A__ ): """simple docstring""" __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase = True else: for key, mapped_key in MAPPING.items(): __lowercase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowercase = True if "*" in mapped_key: __lowercase = name.split(A__ )[0].split('''.''' )[-2] __lowercase = mapped_key.replace('''*''' , A__ ) if "weight_g" in name: __lowercase = '''weight_g''' elif "weight_v" in name: __lowercase = '''weight_v''' elif "bias" in name: __lowercase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase = '''weight''' else: __lowercase = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"Unused weights: {unused_weights}" ) def _A ( A__ , A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = full_name.split('''conv_layers.''' )[-1] __lowercase = name.split('''.''' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(A__ ) @torch.no_grad() def _A ( A__ , A__ , A__=None , A__=None , A__=True ): """simple docstring""" if config_path is not None: __lowercase = UniSpeechSatConfig.from_pretrained(A__ ) else: __lowercase = UniSpeechSatConfig() __lowercase = '''''' if is_finetuned: __lowercase = UniSpeechSatForCTC(A__ ) else: __lowercase = UniSpeechSatForPreTraining(A__ ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowercase = model[0].eval() recursively_load_weights(A__ , A__ ) hf_wavavec.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCAmelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
104
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float: """simple docstring""" __A = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase )] ) __A = np.array(UpperCAmelCase ) __A = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase ) ) , x.transpose() ) , UpperCAmelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float: """simple docstring""" __A = (1, 2, 1) __A = (1, 1, 0, 7) __A = SARIMAX( UpperCAmelCase , exog=UpperCAmelCase , order=UpperCAmelCase , seasonal_order=UpperCAmelCase ) __A = model.fit(disp=UpperCAmelCase , maxiter=6_0_0 , method='nm' ) __A = model_fit.predict(1 , len(UpperCAmelCase ) , exog=[test_match] ) return result[0] def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> float: """simple docstring""" __A = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(UpperCAmelCase , UpperCAmelCase ) __A = regressor.predict(UpperCAmelCase ) return y_pred[0] def snake_case ( UpperCAmelCase )-> float: """simple docstring""" train_user.sort() __A = np.percentile(UpperCAmelCase , 2_5 ) __A = np.percentile(UpperCAmelCase , 7_5 ) __A = qa - qa __A = qa - (iqr * 0.1) return low_lim def snake_case ( UpperCAmelCase , UpperCAmelCase )-> bool: """simple docstring""" __A = 0 __A = 0 for i in list_vote: if i > actual_result: __A = not_safe + 1 else: if abs(abs(UpperCAmelCase ) - abs(UpperCAmelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) a__ : List[str] = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]] a__ : Optional[int] = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) a__ : List[Any] = Normalizer().fit_transform(data_input_df.values) # split data a__ : Dict = normalize_df[:, 2].tolist() a__ : Optional[int] = normalize_df[:, 0].tolist() a__ : str = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) a__ : Tuple = normalize_df[:, [1, 2]].tolist() a__ : Dict = x[: len(x) - 1] a__ : Any = x[len(x) - 1 :] # for linear regression & sarimax a__ : Tuple = total_date[: len(total_date) - 1] a__ : List[Any] = total_user[: len(total_user) - 1] a__ : List[Any] = total_match[: len(total_match) - 1] a__ : List[str] = total_date[len(total_date) - 1 :] a__ : List[str] = total_user[len(total_user) - 1 :] a__ : Tuple = total_match[len(total_match) - 1 :] # voting system with forecasting a__ : Optional[Any] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data a__ : List[str] = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
161
0
_lowerCAmelCase : Any = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ) -> str: # Return True if there is node that has not iterated. A_ : str = [False] * len(_lowerCAmelCase ) A_ : Optional[int] = [s] A_ : Any = True while queue: A_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCAmelCase ) A_ : List[str] = True A_ : Optional[int] = u return visited[t] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ) -> Union[str, Any]: A_ : Optional[int] = [-1] * (len(_lowerCAmelCase )) A_ : Optional[Any] = 0 A_ : Optional[int] = [] A_ : Any = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): A_ : Union[str, Any] = float("Inf" ) A_ : Optional[Any] = sink while s != source: # Find the minimum value in select path A_ : Optional[int] = min(_lowerCAmelCase , graph[parent[s]][s] ) A_ : Tuple = parent[s] max_flow += path_flow A_ : str = sink while v != source: A_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A_ : int = parent[v] for i in range(len(_lowerCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
70
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __snake_case ( ) -> tuple[list[int], int]: A_ : Dict = [randint(-1000 , 1000 ) for i in range(10 )] A_ : List[str] = randint(-5000 , 5000 ) return (arr, r) _lowerCAmelCase : List[Any] = make_dataset() def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, ...]: for triplet in permutations(_lowerCAmelCase , 3 ): if sum(_lowerCAmelCase ) == target: return tuple(sorted(_lowerCAmelCase ) ) return (0, 0, 0) def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, int, int]: arr.sort() A_ : Tuple = len(_lowerCAmelCase ) for i in range(n - 1 ): A_ , A_ : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __snake_case ( ) -> tuple[float, float]: A_ : Union[str, Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" A_ : Tuple = "\ntriplet_sum1(*dataset)\n" A_ : Optional[Any] = "\ntriplet_sum2(*dataset)\n" A_ : List[str] = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 ) A_ : Tuple = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 ) return (min(_lowerCAmelCase ), min(_lowerCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCAmelCase : Optional[Any] = solution_times() print(F'''The time for naive implementation is {times[0]}.''') print(F'''The time for optimized implementation is {times[1]}.''')
70
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _A ( nn.Module ): def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 16 , __SCREAMING_SNAKE_CASE : int = 88 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "geglu" , __SCREAMING_SNAKE_CASE : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , ) for _ in range(2) ]) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __a = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __a = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __a = [1, 0] def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : bool = True , ): '''simple docstring''' __a = hidden_states __a = [] __a = 0 # attention_mask is not used yet for i in range(2): # for each of the two transformers, pass the corresponding condition tokens __a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __a = self.transformer_index_for_condition[i] __a = self.transformers[transformer_index]( __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] encoded_states.append(encoded_state - input_states) tokens_start += self.condition_lengths[i] __a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __a = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE)
49
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
115
0
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance UpperCamelCase__ = 6378137.0 UpperCamelCase__ = 6356752.314245 UpperCamelCase__ = 6378137 def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): __lowerCAmelCase = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __lowerCAmelCase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) ) __lowerCAmelCase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __lowerCAmelCase = haversine_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values __lowerCAmelCase = (b_lata + b_lata) / 2 __lowerCAmelCase = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __lowerCAmelCase = (sin(SCREAMING_SNAKE_CASE_ ) ** 2) * (cos(SCREAMING_SNAKE_CASE_ ) ** 2) __lowerCAmelCase = cos(sigma / 2 ) ** 2 __lowerCAmelCase = (sigma - sin(SCREAMING_SNAKE_CASE_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __lowerCAmelCase = (cos(SCREAMING_SNAKE_CASE_ ) ** 2) * (sin(SCREAMING_SNAKE_CASE_ ) ** 2) __lowerCAmelCase = sin(sigma / 2 ) ** 2 __lowerCAmelCase = (sigma + sin(SCREAMING_SNAKE_CASE_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
354
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCAmelCase , __lowerCAmelCase = [], [] while len(SCREAMING_SNAKE_CASE_ ) > 1: __lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ ) start.append(SCREAMING_SNAKE_CASE_ ) end.append(SCREAMING_SNAKE_CASE_ ) collection.remove(SCREAMING_SNAKE_CASE_ ) collection.remove(SCREAMING_SNAKE_CASE_ ) end.reverse() return start + collection + end if __name__ == "__main__": UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
102
0
from __future__ import annotations def _UpperCAmelCase ( a__ , a__): '''simple docstring''' if len(snake_case__) < k or k < 0: raise ValueError("""Invalid Input""") a_ : Any = sum(array[:k]) for i in range(len(snake_case__) - k): a_ : Union[str, Any] = current_sum - array[i] + array[i + k] a_ : List[Any] = max(snake_case__ , snake_case__) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __snake_case : int = [randint(-10_00, 10_00) for i in range(1_00)] __snake_case : List[str] = randint(0, 1_10) print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
248
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Any ={ '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple =['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int =[ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] =[ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCamelCase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
196
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __a ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ : Dict = tempfile.mkdtemp() # fmt: off UpperCamelCase__ : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCamelCase__ : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) UpperCamelCase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCamelCase__ : Tuple = {"unk_token": "<unk>"} UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) UpperCamelCase__ : List[str] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], "image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } UpperCamelCase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowercase ( self : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowercase ( self : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : Any ): '''simple docstring''' UpperCamelCase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self : Tuple ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.get_tokenizer() UpperCamelCase__ : List[str] = self.get_rust_tokenizer() UpperCamelCase__ : str = self.get_image_processor() UpperCamelCase__ : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCamelCase__ : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCamelCase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) UpperCamelCase__ : Tuple = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCamelCase__ : List[str] = self.get_image_processor() UpperCamelCase__ : Union[str, Any] = self.get_tokenizer() UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = self.prepare_image_inputs() UpperCamelCase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) UpperCamelCase__ : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ : str = self.get_image_processor() UpperCamelCase__ : int = self.get_tokenizer() UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = "lower newer" UpperCamelCase__ : int = processor(text=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self : int ): '''simple docstring''' UpperCamelCase__ : List[str] = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = "lower newer" UpperCamelCase__ : List[Any] = self.prepare_image_inputs() UpperCamelCase__ : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : Optional[int] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ : Optional[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCamelCase__ : Dict = self.get_image_processor() UpperCamelCase__ : Tuple = self.get_tokenizer() UpperCamelCase__ : Dict = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = "lower newer" UpperCamelCase__ : List[str] = self.prepare_image_inputs() UpperCamelCase__ : str = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
196
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __A ( unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Optional[int] =ViTImageProcessor if is_vision_available() else None @property def __lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =(3, 32, 128) __UpperCamelCase : Optional[Any] =tempfile.mkdtemp() # fmt: off __UpperCamelCase : str =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on __UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) __UpperCamelCase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + '\n' ) __UpperCamelCase : int ={ 'do_normalize': False, 'do_resize': True, 'image_processor_type': 'ViTImageProcessor', 'resample': 3, 'size': {'height': 32, 'width': 128}, } __UpperCamelCase : Optional[Any] =os.path.join(self.tmpdirname , lowerCamelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(lowerCamelCase__ , lowerCamelCase__ ) def __lowercase ( self , **lowerCamelCase__ ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def __lowercase ( self , **lowerCamelCase__ ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def __lowercase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __UpperCamelCase : List[str] =Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) return image_input def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =self.get_tokenizer() __UpperCamelCase : Union[str, Any] =self.get_image_processor() __UpperCamelCase : Union[str, Any] =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase : Dict =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =self.get_tokenizer() __UpperCamelCase : List[str] =self.get_image_processor() __UpperCamelCase : int =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase : Union[str, Any] =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __UpperCamelCase : str =self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 ) __UpperCamelCase : Dict =MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.get_image_processor() __UpperCamelCase : Optional[int] =self.get_tokenizer() __UpperCamelCase : Dict =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =self.prepare_image_inputs() __UpperCamelCase : List[Any] =image_processor(lowerCamelCase__ , return_tensors='np' ) __UpperCamelCase : Dict =processor(images=lowerCamelCase__ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =self.get_image_processor() __UpperCamelCase : int =self.get_tokenizer() __UpperCamelCase : str =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __UpperCamelCase : List[Any] ='test' __UpperCamelCase : Optional[Any] =processor(text=lowerCamelCase__ ) __UpperCamelCase : str =tokenizer(lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Dict =self.get_image_processor() __UpperCamelCase : int =self.get_tokenizer() __UpperCamelCase : Optional[int] =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __UpperCamelCase : Dict ='test' __UpperCamelCase : Any =self.prepare_image_inputs() __UpperCamelCase : List[Any] =processor(text=lowerCamelCase__ , images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =self.get_image_processor() __UpperCamelCase : Union[str, Any] =self.get_tokenizer() __UpperCamelCase : List[str] =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __UpperCamelCase : str =processor.char_decode(lowerCamelCase__ ) __UpperCamelCase : Tuple =tokenizer.batch_decode(lowerCamelCase__ ) __UpperCamelCase : Tuple =[seq.replace(' ' , '' ) for seq in decoded_tok] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =self.get_image_processor() __UpperCamelCase : str =self.get_tokenizer() __UpperCamelCase : str =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __UpperCamelCase : List[str] =None __UpperCamelCase : int =self.prepare_image_inputs() __UpperCamelCase : List[Any] =processor(text=lowerCamelCase__ , images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =self.get_image_processor() __UpperCamelCase : List[str] =self.get_tokenizer() __UpperCamelCase : Tuple =MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =torch.randn(1 , 27 , 38 ) __UpperCamelCase : Any =torch.randn(1 , 27 , 50257 ) __UpperCamelCase : Any =torch.randn(1 , 27 , 30522 ) __UpperCamelCase : List[str] =processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
71
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys __lowerCamelCase : Union[str, Any] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
219
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase_ (UpperCamelCase__ : Tuple ): _UpperCAmelCase : Optional[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def lowerCamelCase_ (UpperCamelCase__ : Any ): _UpperCAmelCase : str = emb.weight.shape _UpperCAmelCase : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) _UpperCAmelCase : int = emb.weight.data return lin_layer def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any="facebook/mbart-large-en-ro" , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=False ): _UpperCAmelCase : int = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model'''] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _UpperCAmelCase : str = MBartConfig.from_pretrained(__lowerCAmelCase , vocab_size=__lowerCAmelCase ) if mbart_aa and finetuned: _UpperCAmelCase : str = '''relu''' _UpperCAmelCase : int = state_dict['''decoder.embed_tokens.weight'''] _UpperCAmelCase : int = MBartForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase ) if finetuned: _UpperCAmelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowerCAmelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') _lowerCAmelCase :Any = parser.parse_args() _lowerCAmelCase :Dict = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
350
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =TransfoXLTokenizer a__ =False a__ =False def __lowerCAmelCase ( self ) -> List[str]: super().setUp() _UpperCAmelCase : Dict = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] _UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , **A ) -> Dict: _UpperCAmelCase : Union[str, Any] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A ) def __lowerCAmelCase ( self , A ) -> str: _UpperCAmelCase : str = '''<unk> UNwanted , running''' _UpperCAmelCase : Union[str, Any] = '''<unk> unwanted, running''' return input_text, output_text def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A ) _UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : str = TransfoXLTokenizer(lower_case=A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A ) _UpperCAmelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' _UpperCAmelCase : Optional[Any] = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(A ) , A ) self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A ) def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : str = self.get_tokenizer() _UpperCAmelCase : List[Any] = len(A ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
68
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = 3_84 _lowerCAmelCase = 7 if "tiny" in model_name: _lowerCAmelCase = 96 _lowerCAmelCase = (2, 2, 6, 2) _lowerCAmelCase = (3, 6, 12, 24) elif "small" in model_name: _lowerCAmelCase = 96 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (3, 6, 12, 24) elif "base" in model_name: _lowerCAmelCase = 1_28 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (4, 8, 16, 32) _lowerCAmelCase = 12 _lowerCAmelCase = 5_12 elif "large" in model_name: _lowerCAmelCase = 1_92 _lowerCAmelCase = (2, 2, 18, 2) _lowerCAmelCase = (6, 12, 24, 48) _lowerCAmelCase = 12 _lowerCAmelCase = 7_68 # set label information _lowerCAmelCase = 1_50 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """ade20k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = {v: k for k, v in idalabel.items()} _lowerCAmelCase = SwinConfig( embed_dim=lowerCAmelCase , depths=lowerCAmelCase , num_heads=lowerCAmelCase , window_size=lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) _lowerCAmelCase = UperNetConfig( backbone_config=lowerCAmelCase , auxiliary_in_channels=lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase , ) return config def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) _lowerCAmelCase = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[:dim, :] _lowerCAmelCase = in_proj_bias[: dim] _lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] _lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] _lowerCAmelCase = in_proj_weight[ -dim :, : ] _lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = x.shape _lowerCAmelCase = x.reshape(lowerCAmelCase , 4 , in_channel // 4 ) _lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCAmelCase , lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = x.shape _lowerCAmelCase = x.reshape(lowerCAmelCase , in_channel // 4 , 4 ) _lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCAmelCase , lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = x.shape[0] _lowerCAmelCase = x.reshape(4 , in_channel // 4 ) _lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = x.shape[0] _lowerCAmelCase = x.reshape(in_channel // 4 , 4 ) _lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCAmelCase ) return x def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } _lowerCAmelCase = model_name_to_url[model_name] _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , file_name=lowerCAmelCase )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCAmelCase , param.shape ) _lowerCAmelCase = get_upernet_config(lowerCAmelCase ) _lowerCAmelCase = UperNetForSemanticSegmentation(lowerCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCAmelCase = state_dict.pop(lowerCAmelCase ) if "bn" in key: _lowerCAmelCase = key.replace("""bn""" , """batch_norm""" ) _lowerCAmelCase = val # rename keys _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for src, dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) read_in_q_k_v(lowerCAmelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCAmelCase = reverse_correct_unfold_reduction_order(lowerCAmelCase ) if "norm" in key: _lowerCAmelCase = reverse_correct_unfold_norm_order(lowerCAmelCase ) model.load_state_dict(lowerCAmelCase ) # verify on image _lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert("""RGB""" ) _lowerCAmelCase = SegformerImageProcessor() _lowerCAmelCase = processor(lowerCAmelCase , return_tensors="""pt""" ).pixel_values with torch.no_grad(): _lowerCAmelCase = model(lowerCAmelCase ) _lowerCAmelCase = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCAmelCase = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": _lowerCAmelCase = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": _lowerCAmelCase = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": _lowerCAmelCase = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(lowerCAmelCase ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": A__ : Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[F"""upernet-swin-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A__ : Tuple =parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
70
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase ) for i in range(length - 1 ): _lowerCAmelCase = i for k in range(i + 1 , lowerCAmelCase ): if collection[k] < collection[least]: _lowerCAmelCase = k if least != i: _lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ : str =input('''Enter numbers separated by a comma:\n''').strip() A__ : Optional[int] =[int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
70
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : UNetaDModel __lowerCAmelCase : KarrasVeScheduler def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' super().__init__() self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> Union[Tuple, ImagePipelineOutput]: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.unet.config.sample_size UpperCAmelCase : Optional[int] = (batch_size, 3, img_size, img_size) UpperCAmelCase : str = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase : List[Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase : Any = self.scheduler.schedule[t] UpperCAmelCase : str = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase , UpperCAmelCase : List[Any] = self.scheduler.add_noise_to_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase : Dict = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase : Dict = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase : Tuple = self.scheduler.step_correct( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step_output.prev_sample , step_output["""derivative"""] , ) UpperCAmelCase : Dict = step_output.prev_sample UpperCAmelCase : str = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
76
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _snake_case ( UpperCamelCase : list[list[float]] ): UpperCAmelCase : int = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix UpperCAmelCase : Union[str, Any] = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements UpperCAmelCase : Dict = [[0.0, 0.0], [0.0, 0.0]] UpperCAmelCase , UpperCAmelCase : Dict = matrix[1][1], matrix[0][0] UpperCAmelCase , UpperCAmelCase : Optional[Any] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(UpperCamelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule UpperCAmelCase : Optional[int] = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix UpperCAmelCase : List[Any] = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] UpperCAmelCase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) UpperCAmelCase : List[Any] = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) UpperCAmelCase : int = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) UpperCAmelCase : Dict = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) UpperCAmelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) UpperCAmelCase : Optional[Any] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) UpperCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) UpperCAmelCase : str = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) UpperCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) UpperCAmelCase : Any = array(UpperCamelCase ) for i in range(3 ): for j in range(3 ): UpperCAmelCase : Optional[int] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix UpperCAmelCase : int = array(UpperCamelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(UpperCamelCase ) # Calculate the inverse of the matrix return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
76
1
import argparse import json from tqdm import tqdm def lowerCamelCase__ ( ) -> Tuple: __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=snake_case_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=snake_case_ , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=snake_case_ , help='''where to store parsed gold_data_path file''' , ) __snake_case = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: __snake_case = json.load(snake_case_ ) for dpr_record in tqdm(snake_case_ ): __snake_case = dpr_record['''question'''] __snake_case = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(snake_case_ ) + '''\n''' ) if __name__ == "__main__": main()
24
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' super().__init__() self.register_modules( vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , ) def SCREAMING_SNAKE_CASE (self , a_ = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __snake_case : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.enable_attention_slicing(a_ ) @torch.no_grad() def __call__(self , a_ , a_ = 5_12 , a_ = 5_12 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ): '''simple docstring''' if isinstance(a_ , a_ ): __snake_case : Any = 1 elif isinstance(a_ , a_ ): __snake_case : Any = len(a_ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a_ )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(a_ )}.""" ) # get prompt text embeddings __snake_case : int = self.tokenizer( a_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) __snake_case : int = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __snake_case : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case : Union[str, Any] = text_embeddings.shape __snake_case : Optional[int] = text_embeddings.repeat(1 , a_ , 1 ) __snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case : Dict = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case : List[str] if negative_prompt is None: __snake_case : List[Any] = [''''''] elif type(a_ ) is not type(a_ ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !=""" f""" {type(a_ )}.""" ) elif isinstance(a_ , a_ ): __snake_case : List[str] = [negative_prompt] elif batch_size != len(a_ ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''' ) else: __snake_case : Optional[int] = negative_prompt __snake_case : Optional[int] = text_input_ids.shape[-1] __snake_case : List[Any] = self.tokenizer( a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''pt''' , ) __snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case : str = uncond_embeddings.shape[1] __snake_case : int = uncond_embeddings.repeat(a_ , a_ , 1 ) __snake_case : int = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __snake_case : Dict = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case : Union[str, Any] = torch.randn( a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(self.device ) __snake_case : Tuple = torch.randn(a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to( self.device ) else: __snake_case : Dict = torch.randn( a_ , generator=a_ , device=self.device , dtype=a_ ) __snake_case : Dict = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __snake_case : Union[str, Any] = latents_reference.to(self.device ) __snake_case : Dict = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2 __snake_case : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2 __snake_case : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __snake_case : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __snake_case : int = 0 if dx < 0 else dx __snake_case : Union[str, Any] = 0 if dy < 0 else dy __snake_case : str = max(-dx , 0 ) __snake_case : Tuple = max(-dy , 0 ) # import pdb # pdb.set_trace() __snake_case : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(a_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __snake_case : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case : Tuple = {} if accepts_eta: __snake_case : List[str] = eta for i, t in enumerate(self.progress_bar(a_ ) ): # expand the latents if we are doing classifier free guidance __snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __snake_case : Tuple = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual __snake_case : int = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case : Tuple = noise_pred.chunk(2 ) __snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(a_ , a_ , a_ ) __snake_case : Union[str, Any] = 1 / 0.1_8215 * latents __snake_case : Optional[Any] = self.vae.decode(a_ ).sample __snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors='''pt''' ).to( self.device ) __snake_case , __snake_case : List[Any] = self.safety_checker( images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __snake_case : Union[str, Any] = None if output_type == "pil": __snake_case : Union[str, Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
102
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
344
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class a_ ( _snake_case ): UpperCamelCase__ : List[Any] =(PNDMScheduler,) UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),) def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]: UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**_lowercase) return config def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]: UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_lowercase) UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.set_timesteps(_lowercase) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowercase) UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase) new_scheduler.set_timesteps(_lowercase) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[:] UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __a ( self :Any) -> Optional[Any]: pass def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]: UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.set_timesteps(_lowercase) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowercase) UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase) # copy over dummy past residuals new_scheduler.set_timesteps(_lowercase) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[:] UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __a ( self :int , **_lowercase :str) -> Optional[Any]: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_lowercase) UpperCAmelCase_ = scheduler_class(**_lowercase) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_lowercase) for i, t in enumerate(scheduler.prk_timesteps): UpperCAmelCase_ = model(_lowercase , _lowercase) UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample for i, t in enumerate(scheduler.plms_timesteps): UpperCAmelCase_ = model(_lowercase , _lowercase) UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample return sample def __a ( self :Union[str, Any]) -> int: UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_lowercase) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''): scheduler.set_timesteps(_lowercase) elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''): UpperCAmelCase_ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ = dummy_past_residuals[:] UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __a ( self :Any) -> Dict: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_lowercase) def __a ( self :List[Any]) -> Any: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowercase) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1) UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def __a ( self :Optional[int]) -> str: for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase) def __a ( self :Any) -> List[str]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowercase) def __a ( self :List[Any]) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase) def __a ( self :Any) -> Tuple: for t in [1, 5, 10]: self.check_over_forward(time_step=_lowercase) def __a ( self :Tuple) -> Dict: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=_lowercase) def __a ( self :str) -> List[Any]: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.set_timesteps(_lowercase) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample def __a ( self :List[str]) -> int: with self.assertRaises(_lowercase): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_lowercase) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def __a ( self :List[str]) -> Dict: UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.sum(torch.abs(_lowercase)) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_sum.item() - 198.1_318) < 1E-2 assert abs(result_mean.item() - 0.2_580) < 1E-3 def __a ( self :Any) -> Tuple: UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.sum(torch.abs(_lowercase)) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_sum.item() - 67.3_986) < 1E-2 assert abs(result_mean.item() - 0.0_878) < 1E-3 def __a ( self :int) -> Any: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01) UpperCAmelCase_ = torch.sum(torch.abs(_lowercase)) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_sum.item() - 230.0_399) < 1E-2 assert abs(result_mean.item() - 0.2_995) < 1E-3 def __a ( self :Any) -> Dict: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01) UpperCAmelCase_ = torch.sum(torch.abs(_lowercase)) UpperCAmelCase_ = torch.mean(torch.abs(_lowercase)) assert abs(result_sum.item() - 186.9_482) < 1E-2 assert abs(result_mean.item() - 0.2_434) < 1E-3
344
1
def snake_case_ ( snake_case=2_81_23 ) -> List[str]: lowercase__: List[str] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowercase__: str = set() lowercase__: List[Any] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(snake_case ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
196
__lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( snake_case , snake_case , snake_case ) -> list[str]: lowercase__: int = set() # keep track of all the paths to be checked lowercase__: Optional[int] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase__: List[Any] = queue.pop(0 ) # get the last node from the path lowercase__: Optional[int] = path[-1] if node not in explored: lowercase__: Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase__: Tuple = list(snake_case ) new_path.append(snake_case ) queue.append(snake_case ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(snake_case ) # in case there's no path between the 2 nodes return [] def snake_case_ ( snake_case , snake_case , snake_case ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase__: Tuple = [start] lowercase__: List[Any] = set(snake_case ) # Keep tab on distances from `start` node. lowercase__: Tuple = {start: 0, target: -1} while queue: lowercase__: Dict = queue.pop(0 ) if node == target: lowercase__: str = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(snake_case ) queue.append(snake_case ) lowercase__: List[str] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
196
1
import os from distutils.util import strtobool def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : List[Any] ): '''simple docstring''' for e in env_keys: A_ : Any = int(os.environ.get(__lowercase ,-1 ) ) if val >= 0: return val return default def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any]=False ): '''simple docstring''' A_ : Tuple = os.environ.get(__lowercase ,str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def UpperCamelCase ( __lowercase : Dict ,__lowercase : Tuple="no" ): '''simple docstring''' A_ : List[Any] = os.environ.get(__lowercase ,str(__lowercase ) ) return value
192
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase=None , **lowercase ): """simple docstring""" logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) A_ : List[Any] = model A_ : Dict = kwargs.get('model_save_dir' , lowercase ) A_ : List[str] = kwargs.get('latest_model_name' , lowercase ) def __call__( self , **lowercase ): """simple docstring""" A_ : str = {k: np.array(lowercase ) for k, v in kwargs.items()} return self.model.run(lowercase , lowercase ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase=None , lowercase=None ): """simple docstring""" if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) A_ : List[Any] = 'CPUExecutionProvider' return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , **lowercase ): """simple docstring""" A_ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME A_ : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) A_ : int = Path(lowercase ).joinpath(lowercase ) try: shutil.copyfile(lowercase , lowercase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) A_ : Optional[Any] = self.model_save_dir.joinpath(lowercase ) if src_path.exists(): A_ : int = Path(lowercase ).joinpath(lowercase ) try: shutil.copyfile(lowercase , lowercase ) except shutil.SameFileError: pass def lowerCAmelCase_ ( self , lowercase , **lowercase , ): """simple docstring""" if os.path.isfile(lowercase ): logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(lowercase , exist_ok=lowercase ) # saving model weights/files self._save_pretrained(lowercase , **lowercase ) @classmethod def lowerCAmelCase_ ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ): """simple docstring""" A_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowercase ): A_ : Optional[int] = OnnxRuntimeModel.load_model( os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase ) A_ : Dict = Path(lowercase ) # load model from hub else: # download model A_ : List[str] = hf_hub_download( repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , ) A_ : int = Path(lowercase ).parent A_ : Optional[Any] = Path(lowercase ).name A_ : Any = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase ) return cls(model=lowercase , **lowercase ) @classmethod def lowerCAmelCase_ ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ): """simple docstring""" A_ : List[Any] = None if len(str(lowercase ).split('@' ) ) == 2: A_ , A_ : int = model_id.split('@' ) return cls._from_pretrained( model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
192
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCAmelCase ( metaclass=_lowerCamelCase ): __lowercase = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCAmelCase ( metaclass=_lowerCamelCase ): __lowercase = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCAmelCase ( metaclass=_lowerCamelCase ): __lowercase = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCAmelCase ( metaclass=_lowerCamelCase ): __lowercase = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCAmelCase ( metaclass=_lowerCamelCase ): __lowercase = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class __UpperCAmelCase ( metaclass=_lowerCamelCase ): __lowercase = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'] )
42
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
0
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , lowercase : Dict ) -> List[Any]: _a = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") _a = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(lowercase ): os.makedirs(lowercase ) _a = model.state_dict() def to_tf_var_name(lowercase : Union[str, Any] ): for patt, repl in iter(lowercase ): _a = name.replace(lowercase , lowercase ) return F'bert/{name}' def create_tf_var(lowercase : str , lowercase : List[Any] , lowercase : Optional[int] ): _a = tf.dtypes.as_dtype(tensor.dtype ) _a = tf.get_variable(dtype=lowercase , shape=tensor.shape , name=lowercase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowercase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: _a = to_tf_var_name(lowercase ) _a = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): _a = torch_tensor.T _a = create_tf_var(tensor=lowercase , name=lowercase , session=lowercase ) tf.keras.backend.set_value(lowercase , lowercase ) _a = session.run(lowercase ) print(F'Successfully created {tf_name}: {np.allclose(lowercase , lowercase )}' ) _a = tf.train.Saver(tf.trainable_variables() ) saver.save(lowercase , os.path.join(lowercase , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _lowerCamelCase ( lowercase : Optional[Any]=None ) -> Optional[int]: _a = argparse.ArgumentParser() parser.add_argument("--model_name" , type=lowercase , required=lowercase , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=lowercase , default=lowercase , required=lowercase , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=lowercase , required=lowercase , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=lowercase , required=lowercase , help="Directory in which to save tensorflow model" ) _a = parser.parse_args(lowercase ) _a = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
359
'''simple docstring''' import re from filelock import FileLock try: import nltk lowerCAmelCase_ : Optional[int] = True except (ImportError, ModuleNotFoundError): lowerCAmelCase_ : Tuple = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _lowerCamelCase ( lowercase : str ) -> str: re.sub("<n>" , "" , lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowercase ) )
346
0
from collections.abc import Callable def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : float = a SCREAMING_SNAKE_CASE : float = b if function(_a) == 0: # one of the a or b is a root for the function return a elif function(_a) == 0: return b elif ( function(_a) * function(_a) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval.") else: SCREAMING_SNAKE_CASE : float = start + (end - start) / 2.0 while abs(start - mid) > 10**-7: # until precisely equals to 10^-7 if function(_a) == 0: return mid elif function(_a) * function(_a) < 0: SCREAMING_SNAKE_CASE : List[Any] = mid else: SCREAMING_SNAKE_CASE : List[str] = mid SCREAMING_SNAKE_CASE : Any = start + (end - start) / 2.0 return mid def lowerCamelCase__ ( _a): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
76
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =CustomTokenizer pass
76
1
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=1_3 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : str=1_0 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = scope UpperCamelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 2 def A ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A ( self : Optional[int] ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = DeiTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = DeiTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = DeiTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def A ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = DeiTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = DeiTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def A ( self : Tuple ): """simple docstring""" pass def A ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def A ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def A ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : str=False ): """simple docstring""" UpperCamelCase = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def A ( self : List[str] ): """simple docstring""" if not self.model_tester.is_training: return UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) UpperCamelCase = model(**UpperCamelCase__ ).loss loss.backward() def A ( self : int ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCamelCase = False UpperCamelCase = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCamelCase = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) UpperCamelCase = model(**UpperCamelCase__ ).loss loss.backward() def A ( self : List[str] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase__ ), *get_values(UpperCamelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): UpperCamelCase = problem_type['title'] UpperCamelCase = problem_type['num_labels'] UpperCamelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if problem_type["num_labels"] > 1: UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) UpperCamelCase = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list: UpperCamelCase = model(**UpperCamelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def A ( self : int ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = DeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def A ( self : Optional[int] ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( UpperCamelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCamelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def A ( self : Any ): """simple docstring""" UpperCamelCase = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ) UpperCamelCase = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )
363
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: """simple docstring""" # Load configuration defined in the metadata file with open(A__ ) as metadata_file: UpperCamelCase = json.load(A__ ) UpperCamelCase = LukeConfig(use_entity_aware_attention=A__ , **metadata['model_config'] ) # Load in the weights from the checkpoint_path UpperCamelCase = torch.load(A__ , map_location='cpu' )['module'] # Load the entity vocab file UpperCamelCase = load_original_entity_vocab(A__ ) # add an entry for [MASK2] UpperCamelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks UpperCamelCase = AddedToken('<ent>' , lstrip=A__ , rstrip=A__ ) UpperCamelCase = AddedToken('<ent2>' , lstrip=A__ , rstrip=A__ ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(A__ ) with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'r' ) as f: UpperCamelCase = json.load(A__ ) UpperCamelCase = 'MLukeTokenizer' with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'w' ) as f: json.dump(A__ , A__ ) with open(os.path.join(A__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(A__ , A__ ) UpperCamelCase = MLukeTokenizer.from_pretrained(A__ ) # Initialize the embeddings of the special tokens UpperCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0] UpperCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0] UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 ) UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 ) UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: UpperCamelCase = state_dict[bias_name] UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 ) UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 ) UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCamelCase = F"""encoder.layer.{layer_index}.attention.self.""" UpperCamelCase = state_dict[prefix + matrix_name] UpperCamelCase = state_dict[prefix + matrix_name] UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] UpperCamelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 ) UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' UpperCamelCase = state_dict['entity_predictions.bias'] UpperCamelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 ) UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) UpperCamelCase = LukeForMaskedLM(config=A__ ).eval() state_dict.pop('entity_predictions.decoder.weight' ) state_dict.pop('lm_head.decoder.weight' ) state_dict.pop('lm_head.decoder.bias' ) UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )): UpperCamelCase = state_dict[key] else: UpperCamelCase = state_dict[key] UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ ) if set(A__ ) != {"luke.embeddings.position_ids"}: raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(A__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs UpperCamelCase = MLukeTokenizer.from_pretrained(A__ , task='entity_classification' ) UpperCamelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).' UpperCamelCase = (0, 9) UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' ) UpperCamelCase = model(**A__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base UpperCamelCase = torch.Size((1, 33, 768) ) UpperCamelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base UpperCamelCase = torch.Size((1, 1, 768) ) UpperCamelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction UpperCamelCase = MLukeTokenizer.from_pretrained(A__ ) UpperCamelCase = 'Tokyo is the capital of <mask>.' UpperCamelCase = (24, 30) UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' ) UpperCamelCase = model(**A__ ) UpperCamelCase = encoding['input_ids'][0].tolist() UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) ) UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(A__ ) UpperCamelCase = outputs.entity_logits[0][0].argmax().item() UpperCamelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(A__ ) ) model.save_pretrained(A__ ) def __lowerCamelCase ( A__ ) -> int: """simple docstring""" UpperCamelCase = ['[MASK]', '[PAD]', '[UNK]'] UpperCamelCase = [json.loads(A__ ) for line in open(A__ )] UpperCamelCase = {} for entry in data: UpperCamelCase = entry['id'] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: UpperCamelCase = entity_id break UpperCamelCase = F"""{language}:{entity_name}""" UpperCamelCase = entity_id return new_mapping if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) _lowerCamelCase : Optional[Any] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
249
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''distilbert''' lowerCamelCase = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]: A_ : Tuple = vocab_size A_ : List[Any] = max_position_embeddings A_ : int = sinusoidal_pos_embds A_ : int = n_layers A_ : str = n_heads A_ : Optional[int] = dim A_ : int = hidden_dim A_ : Tuple = dropout A_ : List[Any] = attention_dropout A_ : int = activation A_ : Dict = initializer_range A_ : List[Any] = qa_dropout A_ : int = seq_classif_dropout super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase ) class _lowerCAmelCase ( __A ): """simple docstring""" @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
344
1
import argparse import collections import json import os import re import string import sys import numpy as np _UpperCAmelCase : str = re.compile(R"\b(a|an|the)\b", re.UNICODE) _UpperCAmelCase : List[Any] = None def A ( ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=lowercase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=lowercase , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCamelCase = bool(qa['answers']['text'] ) return qid_to_has_ans def A ( lowercase ) -> Union[str, Any]: '''simple docstring''' def remove_articles(lowercase ): return ARTICLES_REGEX.sub(' ' , lowercase ) def white_space_fix(lowercase ): return " ".join(text.split() ) def remove_punc(lowercase ): UpperCamelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) ) def A ( lowercase ) -> int: '''simple docstring''' if not s: return [] return normalize_answer(lowercase ).split() def A ( lowercase , lowercase ) -> Any: '''simple docstring''' return int(normalize_answer(lowercase ) == normalize_answer(lowercase ) ) def A ( lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = get_tokens(lowercase ) UpperCamelCase = get_tokens(lowercase ) UpperCamelCase = collections.Counter(lowercase ) & collections.Counter(lowercase ) UpperCamelCase = sum(common.values() ) if len(lowercase ) == 0 or len(lowercase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 UpperCamelCase = 1.0 * num_same / len(lowercase ) UpperCamelCase = 1.0 * num_same / len(lowercase ) UpperCamelCase = (2 * precision * recall) / (precision + recall) return fa def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = {} UpperCamelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCamelCase = qa['id'] UpperCamelCase = [t for t in qa['answers']['text'] if normalize_answer(lowercase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string UpperCamelCase = [''] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue UpperCamelCase = preds[qid] # Take max over all gold answers UpperCamelCase = max(compute_exact(lowercase , lowercase ) for a in gold_answers ) UpperCamelCase = max(compute_fa(lowercase , lowercase ) for a in gold_answers ) return exact_scores, fa_scores def A ( lowercase , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' UpperCamelCase = {} for qid, s in scores.items(): UpperCamelCase = na_probs[qid] > na_prob_thresh if pred_na: UpperCamelCase = float(not qid_to_has_ans[qid] ) else: UpperCamelCase = s return new_scores def A ( lowercase , lowercase , lowercase=None ) -> Dict: '''simple docstring''' if not qid_list: UpperCamelCase = len(lowercase ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores.values() ) / total), ('f1', 100.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: UpperCamelCase = len(lowercase ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def A ( lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' for k in new_eval: UpperCamelCase = new_eval[k] def A ( lowercase , lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' plt.step(lowercase , lowercase , color='b' , alpha=0.2 , where='post' ) plt.fill_between(lowercase , lowercase , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(lowercase ) plt.savefig(lowercase ) plt.clf() def A ( lowercase , lowercase , lowercase , lowercase , lowercase=None , lowercase=None ) -> Optional[int]: '''simple docstring''' UpperCamelCase = sorted(lowercase , key=lambda lowercase : na_probs[k] ) UpperCamelCase = 0.0 UpperCamelCase = 1.0 UpperCamelCase = 0.0 UpperCamelCase = [1.0] UpperCamelCase = [0.0] UpperCamelCase = 0.0 for i, qid in enumerate(lowercase ): if qid_to_has_ans[qid]: true_pos += scores[qid] UpperCamelCase = true_pos / float(i + 1 ) UpperCamelCase = true_pos / float(lowercase ) if i == len(lowercase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(lowercase ) recalls.append(lowercase ) if out_image: plot_pr_curve(lowercase , lowercase , lowercase , lowercase ) return {"ap": 100.0 * avg_prec} def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' if out_image_dir and not os.path.exists(lowercase ): os.makedirs(lowercase ) UpperCamelCase = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return UpperCamelCase = make_precision_recall_eval( lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) UpperCamelCase = make_precision_recall_eval( lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) UpperCamelCase = {k: float(lowercase ) for k, v in qid_to_has_ans.items()} UpperCamelCase = make_precision_recall_eval( lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(lowercase , lowercase , 'pr_exact' ) merge_eval(lowercase , lowercase , 'pr_f1' ) merge_eval(lowercase , lowercase , 'pr_oracle' ) def A ( lowercase , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' if not qid_list: return UpperCamelCase = [na_probs[k] for k in qid_list] UpperCamelCase = np.ones_like(lowercase ) / float(len(lowercase ) ) plt.hist(lowercase , weights=lowercase , bins=20 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(lowercase , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def A ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' UpperCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) UpperCamelCase = num_no_ans UpperCamelCase = cur_score UpperCamelCase = 0.0 UpperCamelCase = sorted(lowercase , key=lambda lowercase : na_probs[k] ) for i, qid in enumerate(lowercase ): if qid not in scores: continue if qid_to_has_ans[qid]: UpperCamelCase = scores[qid] else: if preds[qid]: UpperCamelCase = -1 else: UpperCamelCase = 0 cur_score += diff if cur_score > best_score: UpperCamelCase = cur_score UpperCamelCase = na_probs[qid] return 100.0 * best_score / len(lowercase ), best_thresh def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: '''simple docstring''' UpperCamelCase , UpperCamelCase = find_best_thresh(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase , UpperCamelCase = find_best_thresh(lowercase , lowercase , lowercase , lowercase ) UpperCamelCase = best_exact UpperCamelCase = exact_thresh UpperCamelCase = best_fa UpperCamelCase = fa_thresh def A ( ) -> List[str]: '''simple docstring''' with open(OPTS.data_file ) as f: UpperCamelCase = json.load(lowercase ) UpperCamelCase = dataset_json['data'] with open(OPTS.pred_file ) as f: UpperCamelCase = json.load(lowercase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: UpperCamelCase = json.load(lowercase ) else: UpperCamelCase = {k: 0.0 for k in preds} UpperCamelCase = make_qid_to_has_ans(lowercase ) # maps qid to True/False UpperCamelCase = [k for k, v in qid_to_has_ans.items() if v] UpperCamelCase = [k for k, v in qid_to_has_ans.items() if not v] UpperCamelCase , UpperCamelCase = get_raw_scores(lowercase , lowercase ) UpperCamelCase = apply_no_ans_threshold(lowercase , lowercase , lowercase , OPTS.na_prob_thresh ) UpperCamelCase = apply_no_ans_threshold(lowercase , lowercase , lowercase , OPTS.na_prob_thresh ) UpperCamelCase = make_eval_dict(lowercase , lowercase ) if has_ans_qids: UpperCamelCase = make_eval_dict(lowercase , lowercase , qid_list=lowercase ) merge_eval(lowercase , lowercase , 'HasAns' ) if no_ans_qids: UpperCamelCase = make_eval_dict(lowercase , lowercase , qid_list=lowercase ) merge_eval(lowercase , lowercase , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(lowercase , lowercase , lowercase , lowercase , lowercase , OPTS.out_image_dir ) histogram_na_prob(lowercase , lowercase , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(lowercase , lowercase , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(lowercase , lowercase ) else: print(json.dumps(lowercase , indent=2 ) ) if __name__ == "__main__": _UpperCAmelCase : List[str] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
360
_UpperCAmelCase : str = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] _UpperCAmelCase : Any = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] _UpperCAmelCase : List[Any] = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] _UpperCAmelCase : Tuple = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] _UpperCAmelCase : Union[str, Any] = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] _UpperCAmelCase : List[str] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] _UpperCAmelCase : Tuple = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] _UpperCAmelCase : Dict = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
110
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _a (unittest.TestCase ): '''simple docstring''' @property def __A ( self ): torch.manual_seed(0 ) A__ : Tuple = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __A ( self ): A__ : List[Any] = self.dummy_uncond_unet A__ : Dict = PNDMScheduler() A__ : int = PNDMPipeline(unet=A__ , scheduler=A__ ) pndm.to(A__ ) pndm.set_progress_bar_config(disable=A__ ) A__ : Dict = torch.manual_seed(0 ) A__ : List[Any] = pndm(generator=A__ , num_inference_steps=20 , output_type="""numpy""" ).images A__ : List[str] = torch.manual_seed(0 ) A__ : str = pndm(generator=A__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=A__ )[0] A__ : Any = image[0, -3:, -3:, -1] A__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ : Dict = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _a (unittest.TestCase ): '''simple docstring''' def __A ( self ): A__ : int = """google/ddpm-cifar10-32""" A__ : Union[str, Any] = UNetaDModel.from_pretrained(A__ ) A__ : int = PNDMScheduler() A__ : List[Any] = PNDMPipeline(unet=A__ , scheduler=A__ ) pndm.to(A__ ) pndm.set_progress_bar_config(disable=A__ ) A__ : Optional[int] = torch.manual_seed(0 ) A__ : Union[str, Any] = pndm(generator=A__ , output_type="""numpy""" ).images A__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ : int = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
192
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ 'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
192
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar __a :List[str] = TypeVar('T') class _a ( Generic[T] ): """simple docstring""" def __init__( self : Any , UpperCAmelCase : T ): A_ = data A_ = None def __str__( self : Union[str, Any] ): return f'''{self.data}''' class _a ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ): A_ = None def __iter__( self : Dict ): A_ = self.top while node: yield node.data A_ = node.next def __str__( self : Tuple ): return "->".join([str(UpperCAmelCase ) for item in self] ) def __len__( self : Dict ): return len(tuple(iter(self ) ) ) def __A ( self : Optional[int] ): return self.top is None def __A ( self : Dict , UpperCAmelCase : T ): A_ = Node(UpperCAmelCase ) if not self.is_empty(): A_ = self.top A_ = node def __A ( self : Any ): if self.is_empty(): raise IndexError("pop from empty stack" ) assert isinstance(self.top , UpperCAmelCase ) A_ = self.top A_ = self.top.next return pop_node.data def __A ( self : Union[str, Any] ): if self.is_empty(): raise IndexError("peek from empty stack" ) assert self.top is not None return self.top.data def __A ( self : List[str] ): A_ = None if __name__ == "__main__": from doctest import testmod testmod()
329
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
1
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Any = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __SCREAMING_SNAKE_CASE : List[Any] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = tensor[:sequence_length] else: __SCREAMING_SNAKE_CASE : Optional[Any] = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[Any] = tensor[:sequence_length] else: __SCREAMING_SNAKE_CASE : Optional[int] = tensor[:sequence_length] return out_tensor.tolist() def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = ord(lowercase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __SCREAMING_SNAKE_CASE : List[str] = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : int = -100 SCREAMING_SNAKE_CASE__ : str = "pt" def __magic_name__( self :Tuple , lowerCAmelCase__ :Any ) -> List[Any]: import torch __SCREAMING_SNAKE_CASE : int = '''label''' if '''label''' in features[0].keys() else '''labels''' __SCREAMING_SNAKE_CASE : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(batch['''entity_ids'''] ).shape[1] __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.padding_side if padding_side == "right": __SCREAMING_SNAKE_CASE : Optional[int] = [ list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels ] __SCREAMING_SNAKE_CASE : Optional[Any] = [feature['''ner_tags'''] for feature in features] __SCREAMING_SNAKE_CASE : Optional[int] = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = [feature['''original_entity_spans'''] for feature in features] __SCREAMING_SNAKE_CASE : int = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
9
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase: List[Any] = logging.get_logger() @dataclass class UpperCAmelCase : _lowerCamelCase : nn.Module _lowerCamelCase : List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE__) _lowerCamelCase : list = field(default_factory=SCREAMING_SNAKE_CASE__) def lowercase_ ( self : Union[str, Any], a_ : List[Any], a_ : Tensor, a_ : Tensor ): """simple docstring""" UpperCamelCase__ = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad ) if has_not_submodules: self.traced.append(a_ ) def __call__( self : Union[str, Any], a_ : Tensor ): """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(a_ ) [x.remove() for x in self.handles] return self @property def lowercase_ ( self : List[Any] ): """simple docstring""" return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) ) @dataclass class UpperCAmelCase : _lowerCamelCase : nn.Module _lowerCamelCase : nn.Module _lowerCamelCase : int = 0 _lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE__) _lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE__) def __call__( self : str, a_ : Tensor ): """simple docstring""" UpperCamelCase__ = Tracker(self.dest )(a_ ).parametrized UpperCamelCase__ = Tracker(self.src )(a_ ).parametrized UpperCamelCase__ = list(filter(lambda a_ : type(a_ ) not in self.src_skip, a_ ) ) UpperCamelCase__ = list(filter(lambda a_ : type(a_ ) not in self.dest_skip, a_ ) ) if len(a_ ) != len(a_ ): raise Exception( f'Numbers of operations are different. Source module has {len(a_ )} operations while' f' destination module has {len(a_ )}.' ) for dest_m, src_m in zip(a_, a_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : ResNetConfig , _UpperCamelCase : Path , _UpperCamelCase : bool = True ) -> Dict: '''simple docstring''' print(F'Converting {name}...' ) with torch.no_grad(): UpperCamelCase__ = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase ).eval() UpperCamelCase__ = ResNetForImageClassification(_UpperCamelCase ).eval() UpperCamelCase__ = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase ) UpperCamelCase__ = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(_UpperCamelCase ) assert torch.allclose(from_model(_UpperCamelCase ) , our_model(_UpperCamelCase ).logits ), "The model logits don't match the original one." UpperCamelCase__ = F'resnet{"-".join(name.split("resnet" ) )}' print(_UpperCamelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_UpperCamelCase , ) # we can use the convnext one UpperCamelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , ) print(F'Pushed {checkpoint_name}' ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Path , _UpperCamelCase : str = None , _UpperCamelCase : bool = True ) -> int: '''simple docstring''' UpperCamelCase__ = "imagenet-1k-id2label.json" UpperCamelCase__ = 10_00 UpperCamelCase__ = (1, num_labels) UpperCamelCase__ = "huggingface/label-files" UpperCamelCase__ = num_labels UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) ) UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()} UpperCamelCase__ = idalabel UpperCamelCase__ = {v: k for k, v in idalabel.items()} UpperCamelCase__ = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase ) UpperCamelCase__ = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(_UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return config, expected_shape if __name__ == "__main__": __lowercase: List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) __lowercase: Any = parser.parse_args() __lowercase: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
354
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __lowercase: str = random.Random() def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]: '''simple docstring''' if rng is None: UpperCamelCase__ = global_rng UpperCamelCase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCAmelCase ( unittest.TestCase): def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ): """simple docstring""" UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = min_seq_length UpperCamelCase__ = max_seq_length UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase__ = feature_size UpperCamelCase__ = num_mel_bins UpperCamelCase__ = padding_value UpperCamelCase__ = sampling_rate UpperCamelCase__ = return_attention_mask UpperCamelCase__ = do_normalize def lowercase_ ( self : Tuple ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ): """simple docstring""" def _flatten(a_ : Dict ): return list(itertools.chain(*a_ ) ) if equal_length: UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCamelCase__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase): _lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase_ ( self : Any ): """simple docstring""" UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self ) def lowercase_ ( self : Optional[int], a_ : Tuple ): """simple docstring""" self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase_ ( self : Any ): """simple docstring""" UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs] # Test feature size UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) ) # Test batched UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(a_, a_ ): self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase__ = np.asarray(a_ ) UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(a_, a_ ): self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) ) def lowercase_ ( self : List[str] ): """simple docstring""" UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase__ = ["longest", "max_length", "do_not_pad"] UpperCamelCase__ = [None, 16, None] for max_length, padding in zip(a_, a_ ): UpperCamelCase__ = feature_extractor( a_, padding=a_, max_length=a_, return_attention_mask=a_ ) UpperCamelCase__ = inputs.input_features UpperCamelCase__ = inputs.attention_mask UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase_ ( self : Any ): """simple docstring""" UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase__ = ["longest", "max_length", "do_not_pad"] UpperCamelCase__ = [None, 16, None] for max_length, padding in zip(a_, a_ ): UpperCamelCase__ = feature_extractor( a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ ) UpperCamelCase__ = inputs.input_features UpperCamelCase__ = inputs.attention_mask UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase_ ( self : str ): """simple docstring""" UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase__ = feature_extractor( a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, ) UpperCamelCase__ = inputs.input_features UpperCamelCase__ = inputs.attention_mask UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase_ ( self : Any ): """simple docstring""" UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase__ = feature_extractor( a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, ) UpperCamelCase__ = inputs.input_features UpperCamelCase__ = inputs.attention_mask UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] UpperCamelCase__ = feature_extractor( a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, ) UpperCamelCase__ = inputs.input_features UpperCamelCase__ = inputs.attention_mask UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase_ ( self : Optional[Any] ): """simple docstring""" import torch UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa ) UpperCamelCase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase_ ( self : List[str], a_ : int ): """simple docstring""" from datasets import load_dataset UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" ) # automatic decoding with librispeech UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def lowercase_ ( self : int ): """simple docstring""" UpperCamelCase__ = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ] ) # fmt: on UpperCamelCase__ = self._load_datasamples(1 ) UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
31
0
"""simple docstring""" from __future__ import annotations def A_ ( _lowercase ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(__UpperCamelCase ) / len(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
249
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Tuple = parent __A : Optional[Any] = batch_size __A : List[Any] = seq_length __A : int = is_training __A : int = use_input_mask __A : Tuple = use_token_type_ids __A : Dict = use_labels __A : Optional[Any] = vocab_size __A : List[str] = hidden_size __A : Union[str, Any] = num_hidden_layers __A : Tuple = num_attention_heads __A : List[str] = intermediate_size __A : str = hidden_act __A : str = hidden_dropout_prob __A : List[Any] = attention_probs_dropout_prob __A : Dict = max_position_embeddings __A : Any = type_vocab_size __A : Union[str, Any] = type_sequence_label_size __A : Any = initializer_range __A : List[str] = num_labels __A : Optional[int] = num_choices __A : Tuple = scope __A : Union[str, Any] = vocab_size - 1 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : str = None if self.use_input_mask: __A : int = random_attention_mask([self.batch_size, self.seq_length]) __A : Any = None if self.use_labels: __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : str = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A ,__A ,__A : Dict = self.prepare_config_and_inputs() __A : List[Any] = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = GPTNeoXModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase) __A : List[str] = model(_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = True __A : Any = GPTNeoXModel(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = GPTNeoXForCausalLM(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = self.num_labels __A : List[str] = GPTNeoXForQuestionAnswering(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = self.num_labels __A : Tuple = GPTNeoXForSequenceClassification(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = self.num_labels __A : Tuple = GPTNeoXForTokenClassification(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = True __A : List[Any] = GPTNeoXForCausalLM(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() # first forward pass __A : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase) __A : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __A : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size) __A : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __A : str = torch.cat([input_ids, next_tokens] , dim=-1) __A : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1) __A : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase) __A : Dict = output_from_no_past['hidden_states'][0] __A : List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0] # select random slice __A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item() __A : int = output_from_no_past[:, -3:, random_slice_idx].detach() __A : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.prepare_config_and_inputs() __A ,__A ,__A ,__A : Union[str, Any] = config_and_inputs __A : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCAmelCase = ( { '''feature-extraction''': GPTNeoXModel, '''question-answering''': GPTNeoXForQuestionAnswering, '''text-classification''': GPTNeoXForSequenceClassification, '''text-generation''': GPTNeoXForCausalLM, '''token-classification''': GPTNeoXForTokenClassification, '''zero-shot''': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = GPTNeoXModelTester(self) __A : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A ,__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A ,__A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A ,__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() __A : List[Any] = None self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A ,__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase) @unittest.skip(reason='Feed forward chunking is not implemented') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)]) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __A : Tuple = ids_tensor([1, 10] , config.vocab_size) __A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights __A : List[Any] = GPTNeoXModel(_UpperCAmelCase) original_model.to(_UpperCAmelCase) original_model.eval() __A : str = original_model(_UpperCAmelCase).last_hidden_state __A : Dict = original_model(_UpperCAmelCase).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights __A : Union[str, Any] = {'type': scaling_type, 'factor': 10.0} __A : Dict = GPTNeoXModel(_UpperCAmelCase) scaled_model.to(_UpperCAmelCase) scaled_model.eval() __A : Dict = scaled_model(_UpperCAmelCase).last_hidden_state __A : Dict = scaled_model(_UpperCAmelCase).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5)) else: self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5)) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped') for checkpointing in [True, False]: __A : List[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped') if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(_UpperCAmelCase) __A : Any = tokenizer('My favorite food is' , return_tensors='pt').to(_UpperCAmelCase) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 __A : int = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' __A : str = model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20) __A : int = tokenizer.batch_decode(_UpperCAmelCase)[0] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
190
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowercase__ : Optional[int] = HfApi() lowercase__ : Dict = {} # fmt: off lowercase__ : List[str] = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) lowercase__ : Tuple = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) lowercase__ : Optional[Any] = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) lowercase__ : List[Any] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) lowercase__ : Dict = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) lowercase__ : Optional[int] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) lowercase__ : List[Any] = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) lowercase__ : List[str] = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) lowercase__ : Dict = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) lowercase__ : Optional[int] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) lowercase__ : List[str] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) lowercase__ : Optional[int] = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) lowercase__ : int = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) lowercase__ : int = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) lowercase__ : List[Any] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on lowercase__ : str = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowercase__ : int = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('''CompVis'''): lowercase__ : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowercase__ : Tuple = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowercase__ : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowercase__ : int = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowercase__ : Tuple = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
190
1
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( UpperCamelCase__ ): def lowercase ( self : Dict , _lowerCamelCase : str ): with open(UpperCamelCase_ , encoding='''utf-8''' ) as input_file: _snake_case = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) _snake_case = input_file.read() _snake_case = regexp.search(UpperCamelCase_ ) return match def lowercase ( self : List[str] , _lowerCamelCase : str ): with open(UpperCamelCase_ , encoding='''utf-8''' ) as input_file: _snake_case = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) _snake_case = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _snake_case = regexp.finditer(UpperCamelCase_ ) _snake_case = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def lowercase ( self : List[Any] ): _snake_case = Path('''./datasets''' ) _snake_case = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(UpperCamelCase_ ) ): raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' ) def lowercase ( self : Any ): _snake_case = Path('''./datasets''' ) _snake_case = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(UpperCamelCase_ ) ): raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
288
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['DPTFeatureExtractor'] lowerCAmelCase = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
110
0
"""simple docstring""" from __future__ import annotations import time import numpy as np A : List[str] = [8, 5, 9, 7] A : Tuple = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Union[str, Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a , __a , ): __lowerCAmelCase = claim_vector __lowerCAmelCase = allocated_resources_table __lowerCAmelCase = maximum_claim_table def snake_case ( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def snake_case ( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def snake_case ( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def snake_case ( self ): return {self.__need().index(__a ): i for i in self.__need()} def snake_case ( self , **__a ): __lowerCAmelCase = self.__need() __lowerCAmelCase = self.__allocated_resources_table __lowerCAmelCase = self.__available_resources() __lowerCAmelCase = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: __lowerCAmelCase = False for each_need in need_list: __lowerCAmelCase = True for index, need in enumerate(__a ): if need > available_resources[index]: __lowerCAmelCase = False break if execution: __lowerCAmelCase = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack __lowerCAmelCase = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__a ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def snake_case ( self ): print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__a ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__a ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__a ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
259
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer A : Any = logging.get_logger(__name__) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : int ="""AutoTokenizer""" __UpperCAmelCase : Union[str, Any] =["""tokenizer"""] __UpperCAmelCase : Tuple ={ """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __a , __a=None ): super().__init__(__a ) __lowerCAmelCase = speaker_embeddings @classmethod def snake_case ( cls , __a , __a="speaker_embeddings_path.json" , **__a ): if speaker_embeddings_dict_path is not None: __lowerCAmelCase = get_file_from_repo( __a , __a , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , ) if speaker_embeddings_path is None: logger.warning( f"`{os.path.join(__a , __a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) __lowerCAmelCase = None else: with open(__a ) as speaker_embeddings_json: __lowerCAmelCase = json.load(__a ) else: __lowerCAmelCase = None __lowerCAmelCase = AutoTokenizer.from_pretrained(__a , **__a ) return cls(tokenizer=__a , speaker_embeddings=__a ) def snake_case ( self , __a , __a="speaker_embeddings_path.json" , __a="speaker_embeddings" , __a = False , **__a , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(__a , __a , "v2" ) , exist_ok=__a ) __lowerCAmelCase = {} __lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __lowerCAmelCase = self._load_voice_preset(__a ) __lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , __a , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__a , ) __lowerCAmelCase = os.path.join(__a , f"{prompt_key}_{key}.npy" ) __lowerCAmelCase = tmp_dict with open(os.path.join(__a , __a ) , "w" ) as fp: json.dump(__a , __a ) super().save_pretrained(__a , __a , **__a ) def snake_case ( self , __a = None , **__a ): __lowerCAmelCase = self.speaker_embeddings[voice_preset] __lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) __lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , ) if path is None: raise ValueError( f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) __lowerCAmelCase = np.load(__a ) return voice_preset_dict def snake_case ( self , __a = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __a=None , __a=None , __a="pt" , __a=2_56 , __a=False , __a=True , __a=False , **__a , ): if voice_preset is not None and not isinstance(__a , __a ): if ( isinstance(__a , __a ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __lowerCAmelCase = self._load_voice_preset(__a ) else: if isinstance(__a , __a ) and not voice_preset.endswith(".npz" ): __lowerCAmelCase = voice_preset + ".npz" __lowerCAmelCase = np.load(__a ) if voice_preset is not None: self._validate_voice_preset_dict(__a , **__a ) __lowerCAmelCase = BatchFeature(data=__a , tensor_type=__a ) __lowerCAmelCase = self.tokenizer( __a , return_tensors=__a , padding="max_length" , max_length=__a , return_attention_mask=__a , return_token_type_ids=__a , add_special_tokens=__a , **__a , ) if voice_preset is not None: __lowerCAmelCase = voice_preset return encoded_text
259
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ :Union[str, Any] = TypeVar('''T''') class __a ( Generic[T] ): def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = data _UpperCAmelCase = None def __str__( self ) -> str: """simple docstring""" return f'''{self.data}''' class __a ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" _UpperCAmelCase = None def __iter__( self ) -> Iterator[T]: """simple docstring""" _UpperCAmelCase = self.top while node: yield node.data _UpperCAmelCase = node.next def __str__( self ) -> str: """simple docstring""" return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] ) def __len__( self ) -> int: """simple docstring""" return len(tuple(iter(self ) ) ) def UpperCAmelCase__ ( self ) -> bool: """simple docstring""" return self.top is None def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" _UpperCAmelCase = Node(_SCREAMING_SNAKE_CASE ) if not self.is_empty(): _UpperCAmelCase = self.top _UpperCAmelCase = node def UpperCAmelCase__ ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.top _UpperCAmelCase = self.top.next return pop_node.data def UpperCAmelCase__ ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def UpperCAmelCase__ ( self ) -> None: """simple docstring""" _UpperCAmelCase = None if __name__ == "__main__": from doctest import testmod testmod()
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase__ = [0, 2_5, 5_0] UpperCAmelCase__ = [2_5, 5_0, 7_5] UpperCAmelCase__ = fuzz.membership.trimf(X, abca) UpperCAmelCase__ = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase__ = np.ones(7_5) UpperCAmelCase__ = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase__ = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase__ = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase__ = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
30
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[Any] = 'vision-encoder-decoder' _snake_case : Optional[int] = True def __init__( self : int , **__lowerCAmelCase : Any ): super().__init__(**__lowerCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _UpperCAmelCase = kwargs.pop("""encoder""" ) _UpperCAmelCase = encoder_config.pop("""model_type""" ) _UpperCAmelCase = kwargs.pop("""decoder""" ) _UpperCAmelCase = decoder_config.pop("""model_type""" ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = True @classmethod def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ): logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _UpperCAmelCase = True _UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.encoder.to_dict() _UpperCAmelCase = self.decoder.to_dict() _UpperCAmelCase = self.__class__.model_type return output class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : Tuple ): return 1e-4 @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = OrderedDict() _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ): import torch _UpperCAmelCase = OrderedDict() _UpperCAmelCase = super().generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape _UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) _UpperCAmelCase = dummy_input.pop("""input_ids""" ) _UpperCAmelCase = dummy_input.pop("""attention_mask""" ) _UpperCAmelCase = torch.zeros(__lowerCAmelCase ) return common_inputs class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ): _UpperCAmelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
30
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off __UpperCamelCase = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786, 1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791, 1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409, 3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361 ] __UpperCamelCase = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793, 1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675, 2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865, 4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362 ] class UpperCamelCase ( snake_case__ ): SCREAMING_SNAKE_CASE_ = "whisper" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, lowerCAmelCase__=5_1865, lowerCAmelCase__=80, lowerCAmelCase__=6, lowerCAmelCase__=4, lowerCAmelCase__=6, lowerCAmelCase__=4, lowerCAmelCase__=1536, lowerCAmelCase__=1536, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=5_0257, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__="gelu", lowerCAmelCase__=256, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=False, lowerCAmelCase__=1500, lowerCAmelCase__=448, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=None, lowerCAmelCase__=[220, 5_0256], lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=False, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__=7, **lowerCAmelCase__, ) -> Union[str, Any]: snake_case_ = vocab_size snake_case_ = num_mel_bins snake_case_ = d_model snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = encoder_ffn_dim snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = max_source_positions snake_case_ = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. snake_case_ = classifier_proj_size snake_case_ = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = apply_spec_augment snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks snake_case_ = median_filter_width super().__init__( pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, is_encoder_decoder=lowerCAmelCase__, decoder_start_token_id=lowerCAmelCase__, suppress_tokens=lowerCAmelCase__, begin_suppress_tokens=lowerCAmelCase__, **lowerCAmelCase__, ) class UpperCamelCase ( snake_case__ ): @property def a_ ( self) -> str: snake_case_ = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ]) if self.use_past: snake_case_ = {0: "batch"} else: snake_case_ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase__, direction='inputs') return common_inputs def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = 2_2050, lowerCAmelCase__ = 5.0, lowerCAmelCase__ = 220, ) -> Optional[int]: snake_case_ = OrderedDict() snake_case_ = OnnxConfig.generate_dummy_inputs( self, preprocessor=preprocessor.feature_extractor, batch_size=lowerCAmelCase__, framework=lowerCAmelCase__, sampling_rate=lowerCAmelCase__, time_duration=lowerCAmelCase__, frequency=lowerCAmelCase__, ) snake_case_ = encoder_inputs["input_features"].shape[2] snake_case_ = encoder_sequence_length // 2 if self.use_past else seq_length snake_case_ = super().generate_dummy_inputs( preprocessor.tokenizer, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = encoder_inputs.pop('input_features') snake_case_ = decoder_inputs.pop('decoder_input_ids') if "past_key_values" in decoder_inputs: snake_case_ = decoder_inputs.pop('past_key_values') return dummy_inputs @property def a_ ( self) -> Tuple: return 1e-3
69
'''simple docstring''' __SCREAMING_SNAKE_CASE : Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
31
0
'''simple docstring''' from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCAmelCase__ ( lowerCamelCase_ ): @slow @require_torch def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) lowercase_ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowercase_ : Tuple = bertabert.config.encoder.vocab_size lowercase_ : Any = tokenizer.sep_token_id lowercase_ : Optional[Any] = tokenizer.cls_token_id lowercase_ : str = 1_28 lowercase_ : str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) lowercase_ : Optional[Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) lowercase_ : str = train_dataset.select(range(32 ) ) lowercase_ : Any = val_dataset.select(range(16 ) ) lowercase_ : int = 4 def _map_to_encoder_decoder_inputs(__SCREAMING_SNAKE_CASE ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase_ : List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE , max_length=5_12 ) lowercase_ : Optional[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE , max_length=1_28 ) lowercase_ : Tuple = inputs.input_ids lowercase_ : str = inputs.attention_mask lowercase_ : str = outputs.input_ids lowercase_ : Dict = outputs.input_ids.copy() lowercase_ : List[str] = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] lowercase_ : List[Any] = outputs.attention_mask assert all(len(__SCREAMING_SNAKE_CASE ) == 5_12 for x in inputs.input_ids ) assert all(len(__SCREAMING_SNAKE_CASE ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(__SCREAMING_SNAKE_CASE ): lowercase_ : Any = pred.label_ids lowercase_ : Any = pred.predictions # all unnecessary tokens are removed lowercase_ : List[str] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] ) / len(__SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowercase_ : List[Any] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset lowercase_ : Dict = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) lowercase_ : Tuple = self.get_auto_remove_tmp_dir() lowercase_ : int = SeqaSeqTrainingArguments( output_dir=__SCREAMING_SNAKE_CASE , per_device_train_batch_size=__SCREAMING_SNAKE_CASE , per_device_eval_batch_size=__SCREAMING_SNAKE_CASE , predict_with_generate=__SCREAMING_SNAKE_CASE , evaluation_strategy='''steps''' , do_train=__SCREAMING_SNAKE_CASE , do_eval=__SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase_ : Union[str, Any] = SeqaSeqTrainer( model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , ) # start training trainer.train()
264
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : str = "▁" _lowercase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"} _lowercase : Dict = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } _lowercase : Optional[Any] = { "facebook/xglm-564M": 2_0_4_8, } class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase_ : Optional[Any] = 7 lowercase_ : List[Any] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase_ : Tuple = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase_ : List[Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase_ : Dict = len(self.sp_model ) lowercase_ : int = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): """simple docstring""" lowercase_ : List[Any] = self.__dict__.copy() lowercase_ : Optional[Any] = None lowercase_ : List[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase_ : Optional[Any] = {} lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase_ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" lowercase_ : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def _snake_case ( self ): """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def _snake_case ( self ): """simple docstring""" lowercase_ : Any = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ : str = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Dict = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : str = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowercase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
264
1
'''simple docstring''' import math import tensorflow as tf from packaging import version def _lowerCAmelCase ( __snake_case : int ) -> int: __A : Any = tf.convert_to_tensor(__snake_case ) __A : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Any: __A : Union[str, Any] = tf.convert_to_tensor(__snake_case ) __A : Tuple = tf.cast(math.pi , x.dtype ) __A : Tuple = tf.cast(0.044_715 , x.dtype ) __A : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__snake_case , 3 )) )) return x * cdf def _lowerCAmelCase ( __snake_case : List[str] ) -> Tuple: __A : str = tf.convert_to_tensor(__snake_case ) return x * tf.tanh(tf.math.softplus(__snake_case ) ) def _lowerCAmelCase ( __snake_case : Any ) -> List[str]: __A : Any = tf.convert_to_tensor(__snake_case ) __A : Optional[Any] = tf.cast(0.044_715 , x.dtype ) __A : int = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def _lowerCAmelCase ( __snake_case : Any ) -> Tuple: __A : List[str] = tf.convert_to_tensor(__snake_case ) __A : Optional[int] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def _lowerCAmelCase ( __snake_case : List[str] ) -> Tuple: return tf.clip_by_value(_gelu(__snake_case ) , -10 , 10 ) def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Union[str, Any]=-1 ) -> List[str]: __A ,__A : Dict = tf.split(__snake_case , 2 , axis=__snake_case ) return a * tf.math.sigmoid(__snake_case ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def _lowerCAmelCase ( __snake_case : List[str] ) -> List[Any]: return tf.keras.activations.gelu(__snake_case , approximate=__snake_case ) lowercase__ : List[Any] = tf.keras.activations.gelu lowercase__ : Any = approximate_gelu_wrap else: lowercase__ : Dict = _gelu lowercase__ : List[Any] = _gelu_new lowercase__ : Union[str, Any] = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[int]: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
190
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = ['''image_processor''', '''tokenizer'''] lowerCAmelCase = '''BlipImageProcessor''' lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = False super().__init__(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[int] = self.image_processor def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ): '''simple docstring''' if images is None and text is None: raise ValueError('You have to specify either images or text.') # Get only text if images is None: __A : int = self.tokenizer __A : Optional[Any] = self.tokenizer( text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) return text_encoding # add pixel_values __A : List[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase) if text is not None: __A : Optional[Any] = self.tokenizer( text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) else: __A : int = None if text_encoding is not None: encoding_image_processor.update(_UpperCAmelCase) return encoding_image_processor def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.tokenizer.model_input_names __A : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
190
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if any(not isinstance(UpperCamelCase , UpperCamelCase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(UpperCamelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
184
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCAmelCase_: '''simple docstring''' __lowercase : Optional[Union[str, Path]] = None __lowercase : bool = False __lowercase : bool = False __lowercase : bool = False __lowercase : Optional[Dict] = None __lowercase : Optional[str] = None __lowercase : bool = False __lowercase : bool = False __lowercase : bool = False __lowercase : bool = True __lowercase : Optional[int] = None __lowercase : int = 1 __lowercase : Optional[Union[str, bool]] = None __lowercase : bool = False __lowercase : Optional[Dict] = None __lowercase : Optional[str] = None def UpperCAmelCase_ ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
184
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): UpperCamelCase :Tuple = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = '''sshleifer/tiny-gpt2''' UpperCamelCase :int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[int] = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :str = '''sgugger/tiny-distilbert-classification''' UpperCamelCase :List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , only_pretrain_model=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :List[str] = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[int] = '''sshleifer/tiny-gpt2''' UpperCamelCase :Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :str = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :List[Any] = '''sshleifer/tiny-gpt2''' UpperCamelCase :Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :str = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] ) UpperCamelCase :List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = '''sshleifer/tiny-gpt2''' UpperCamelCase :str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :str = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] ) UpperCamelCase :int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Dict = '''sshleifer/tiny-gpt2''' UpperCamelCase :List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :int = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = '''sshleifer/tiny-gpt2''' UpperCamelCase :str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[Any] = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] ) UpperCamelCase :Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :List[Any] = '''patrickvonplaten/t5-tiny-random''' UpperCamelCase :int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :List[Any] = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] ) UpperCamelCase :Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Optional[Any] = '''sshleifer/tiny-gpt2''' UpperCamelCase :str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Any = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase :Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE_ , save_to_csv=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , '''env.csv''' ) , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[Any] = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) benchmark.run() self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , '''env.csv''' ) ).exists() ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE_ ): self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''sequential''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''cumulative''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''current''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase :Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE_ , '''log.txt''' ) , log_print=SCREAMING_SNAKE_CASE_ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE_ , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :str = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , '''log.txt''' ) ).exists() )
259
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Dict: UpperCamelCase :Any = parent UpperCamelCase :Dict = 13 UpperCamelCase :List[Any] = 7 UpperCamelCase :List[Any] = True UpperCamelCase :Dict = True UpperCamelCase :Union[str, Any] = True UpperCamelCase :List[str] = True UpperCamelCase :Dict = 99 UpperCamelCase :Any = 32 UpperCamelCase :Tuple = 2 UpperCamelCase :Union[str, Any] = 4 UpperCamelCase :List[str] = 37 UpperCamelCase :Dict = '''gelu''' UpperCamelCase :Dict = 0.1 UpperCamelCase :Tuple = 0.1 UpperCamelCase :Dict = 512 UpperCamelCase :str = 16 UpperCamelCase :Optional[Any] = 2 UpperCamelCase :Dict = 0.02 UpperCamelCase :Optional[int] = 3 UpperCamelCase :int = 4 UpperCamelCase :Dict = None def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Optional[int] = None if self.use_input_mask: UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :Dict = None if self.use_token_type_ids: UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase :Union[str, Any] = None UpperCamelCase :Optional[int] = None UpperCamelCase :Any = None if self.use_labels: UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = TFRoFormerModel(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCamelCase :int = [input_ids, input_mask] UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :List[Any] = True UpperCamelCase :Union[str, Any] = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE_ )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[Any] = self.num_labels UpperCamelCase :int = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :List[Any] = self.num_choices UpperCamelCase :Any = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :List[Any] = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :Union[str, Any] = self.num_labels UpperCamelCase :Dict = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Union[str, Any] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[int] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str =( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase_ : Tuple =( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : Optional[Any] =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Any = TFRoFormerModelTester(self ) UpperCamelCase :Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Dict = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Tuple = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) UpperCamelCase :Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ )[0] # TODO Replace vocab size UpperCamelCase :Tuple = 5_0000 UpperCamelCase :Optional[Any] = [1, 6, vocab_size] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. UpperCamelCase :int = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =1E-4 def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :str = tf.constant([[4, 10]] ) UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) UpperCamelCase :str = emba(input_ids.shape ) UpperCamelCase :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Dict = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) UpperCamelCase :Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) UpperCamelCase :Any = emba.weight[:3, :5] tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] =1E-4 def UpperCAmelCase ( self ) -> List[str]: # 2,12,16,64 UpperCamelCase :List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 UpperCamelCase :List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) UpperCamelCase :int = embed_positions([2, 16, 768] )[None, None, :, :] UpperCamelCase , UpperCamelCase :List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) UpperCamelCase :Optional[int] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
259
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __a: List[Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __a: List[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : str = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0] @deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __UpperCamelCase ( UpperCAmelCase ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream: lowercase__ : int = _readaa(UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) lowercase__ : Tuple = _readaa(UpperCAmelCase ) lowercase__ : Optional[Any] = _readaa(UpperCAmelCase ) lowercase__ : int = _readaa(UpperCAmelCase ) lowercase__ : List[str] = bytestream.read(rows * cols * num_images ) lowercase__ : Optional[int] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta ) lowercase__ : Optional[Any] = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 ) return data @deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): lowercase__ : int = labels_dense.shape[0] lowercase__ : Any = numpy.arange(UpperCAmelCase ) * num_classes lowercase__ : List[str] = numpy.zeros((num_labels, num_classes) ) lowercase__ : Dict = 1 return labels_one_hot @deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream: lowercase__ : List[Any] = _readaa(UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) lowercase__ : List[str] = _readaa(UpperCAmelCase ) lowercase__ : Optional[int] = bytestream.read(UpperCAmelCase ) lowercase__ : Dict = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase ) return labels class UpperCAmelCase : '''simple docstring''' @deprecated( __A , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=dtypes.floataa , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> List[Any]: lowercase__ : Optional[Any] = random_seed.get_seed(__A ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) lowercase__ : Optional[Any] = dtypes.as_dtype(__A ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: lowercase__ : Any = 10000 lowercase__ : str = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F"""images.shape: {images.shape} labels.shape: {labels.shape}""" lowercase__ : Tuple = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 lowercase__ : Optional[Any] = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. lowercase__ : Tuple = images.astype(numpy.floataa ) lowercase__ : Tuple = numpy.multiply(__A , 1.0 / 255.0 ) lowercase__ : str = images lowercase__ : Union[str, Any] = labels lowercase__ : int = 0 lowercase__ : Any = 0 @property def _lowerCAmelCase( self ) -> Tuple: return self._images @property def _lowerCAmelCase( self ) -> List[str]: return self._labels @property def _lowerCAmelCase( self ) -> List[str]: return self._num_examples @property def _lowerCAmelCase( self ) -> Any: return self._epochs_completed def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ) -> Union[str, Any]: if fake_data: lowercase__ : int = [1] * 784 lowercase__ : Optional[int] = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__A )], [fake_label for _ in range(__A )], ) lowercase__ : Any = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: lowercase__ : Dict = numpy.arange(self._num_examples ) numpy.random.shuffle(__A ) lowercase__ : Optional[Any] = self.images[perma] lowercase__ : str = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch lowercase__ : List[str] = self._num_examples - start lowercase__ : Tuple = self._images[start : self._num_examples] lowercase__ : List[str] = self._labels[start : self._num_examples] # Shuffle the data if shuffle: lowercase__ : Optional[Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(__A ) lowercase__ : Any = self.images[perm] lowercase__ : Any = self.labels[perm] # Start next epoch lowercase__ : Any = 0 lowercase__ : List[Any] = batch_size - rest_num_examples lowercase__ : str = self._index_in_epoch lowercase__ : List[Any] = self._images[start:end] lowercase__ : Dict = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size lowercase__ : List[Any] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): if not gfile.Exists(UpperCAmelCase ): gfile.MakeDirs(UpperCAmelCase ) lowercase__ : Tuple = os.path.join(UpperCAmelCase , UpperCAmelCase ) if not gfile.Exists(UpperCAmelCase ): urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310 with gfile.GFile(UpperCAmelCase ) as f: lowercase__ : List[str] = f.size() print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase ) lowercase__ : Tuple = fake() lowercase__ : int = fake() lowercase__ : Union[str, Any] = fake() return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase ) if not source_url: # empty string check lowercase__ : Union[str, Any] = DEFAULT_SOURCE_URL lowercase__ : Optional[int] = '''train-images-idx3-ubyte.gz''' lowercase__ : str = '''train-labels-idx1-ubyte.gz''' lowercase__ : List[Any] = '''t10k-images-idx3-ubyte.gz''' lowercase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz''' lowercase__ : Any = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + train_images_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: lowercase__ : Any = _extract_images(UpperCAmelCase ) lowercase__ : List[Any] = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: lowercase__ : List[str] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase ) lowercase__ : Optional[int] = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + test_images_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: lowercase__ : Optional[int] = _extract_images(UpperCAmelCase ) lowercase__ : Dict = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: lowercase__ : Optional[int] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase ) if not 0 <= validation_size <= len(UpperCAmelCase ): lowercase__ : List[str] = ( '''Validation size should be between 0 and ''' F"""{len(UpperCAmelCase )}. Received: {validation_size}.""" ) raise ValueError(UpperCAmelCase ) lowercase__ : Dict = train_images[:validation_size] lowercase__ : Optional[Any] = train_labels[:validation_size] lowercase__ : Union[str, Any] = train_images[validation_size:] lowercase__ : Optional[int] = train_labels[validation_size:] lowercase__ : Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} lowercase__ : int = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) lowercase__ : List[str] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) lowercase__ : Dict = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
361
'''simple docstring''' class UpperCAmelCase : '''simple docstring''' def __init__( self ) -> List[str]: lowercase__ : Dict = {} def _lowerCAmelCase( self ) -> None: print(self.vertex ) for i in self.vertex: print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__lowerCAmelCase ) else: # else make a new vertex lowercase__ : Union[str, Any] = [to_vertex] def _lowerCAmelCase( self ) -> None: # visited array for storing already visited nodes lowercase__ : str = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None: # mark start vertex as visited lowercase__ : List[str] = True print(__lowerCAmelCase , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": __a: Optional[Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
214
0
def a ( snake_case__: int ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise TypeError('''Input value must be an \'int\' type''' ) lowercase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
30
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
1
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class __UpperCamelCase : def __init__( self, lowerCAmelCase = "cpu", lowerCAmelCase = "openai/clip-vit-large-patch14" ): """simple docstring""" lowerCamelCase_ =device lowerCamelCase_ =CLIPTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] lowerCamelCase_ =[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] lowerCamelCase_ =torchvision.transforms.Normalize(self.image_mean, self.image_std ) lowerCamelCase_ =torchvision.transforms.Resize(224 ) lowerCamelCase_ =torchvision.transforms.CenterCrop(224 ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.resize(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.center_crop(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.normalize(_SCREAMING_SNAKE_CASE ) return images def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.tokenizer(text=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.preprocess_img(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase=10, lowerCAmelCase=0.0_1, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase="image", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, ): """simple docstring""" super().__init__() lowerCamelCase_ =None lowerCamelCase_ =device if device else get_device() if vqgan: lowerCamelCase_ =vqgan else: lowerCamelCase_ =load_vqgan(self.device, conf_path=_SCREAMING_SNAKE_CASE, ckpt_path=_SCREAMING_SNAKE_CASE ) self.vqgan.eval() if clip: lowerCamelCase_ =clip else: lowerCamelCase_ =CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' ) self.clip.to(self.device ) lowerCamelCase_ =ProcessorGradientFlow(device=self.device ) lowerCamelCase_ =iterations lowerCamelCase_ =lr lowerCamelCase_ =log lowerCamelCase_ =make_grid lowerCamelCase_ =return_val lowerCamelCase_ =quantize lowerCamelCase_ =self.vqgan.decoder.z_shape def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=5, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =[] if output_path is None: lowerCamelCase_ ="./animation.gif" if input_path is None: lowerCamelCase_ =self.save_path lowerCamelCase_ =sorted(glob(input_path + '''/*''' ) ) if not len(_SCREAMING_SNAKE_CASE ): raise ValueError( '''No images found in save path, aborting (did you pass save_intermediate=True to the generate''' ''' function?)''' ) if len(_SCREAMING_SNAKE_CASE ) == 1: print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' ) lowerCamelCase_ =total_duration / len(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[frame_duration] * len(_SCREAMING_SNAKE_CASE ) if extend_frames: lowerCamelCase_ =1.5 lowerCamelCase_ =3 for file_name in paths: if file_name.endswith('''.png''' ): images.append(imageio.imread(_SCREAMING_SNAKE_CASE ) ) imageio.mimsave(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, duration=_SCREAMING_SNAKE_CASE ) print(f'''gif saved to {output_path}''' ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" if not (path or img): raise ValueError('''Input either path or tensor''' ) if img is not None: raise NotImplementedError lowerCamelCase_ =preprocess(Image.open(_SCREAMING_SNAKE_CASE ), target_image_size=256 ).to(self.device ) lowerCamelCase_ =preprocess_vqgan(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.vqgan.encode(_SCREAMING_SNAKE_CASE ) return z def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.latent.detach().requires_grad_() lowerCamelCase_ =base_latent + transform_vector if self.quantize: lowerCamelCase_ =self.vqgan.quantize(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ =trans_latent return self.vqgan.decode(_SCREAMING_SNAKE_CASE ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =self.clip_preprocessor(text=_SCREAMING_SNAKE_CASE, images=_SCREAMING_SNAKE_CASE, return_tensors='''pt''', padding=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.clip(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =clip_outputs.logits_per_image if weights is not None: lowerCamelCase_ =similarity_logits * weights return similarity_logits.sum() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self._get_clip_similarity(pos_prompts['''prompts'''], _SCREAMING_SNAKE_CASE, weights=(1 / pos_prompts['''weights''']) ) if neg_prompts: lowerCamelCase_ =self._get_clip_similarity(neg_prompts['''prompts'''], _SCREAMING_SNAKE_CASE, weights=neg_prompts['''weights'''] ) else: lowerCamelCase_ =torch.tensor([1], device=self.device ) lowerCamelCase_ =-torch.log(_SCREAMING_SNAKE_CASE ) + torch.log(_SCREAMING_SNAKE_CASE ) return loss def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =torch.randn_like(self.latent, requires_grad=_SCREAMING_SNAKE_CASE, device=self.device ) lowerCamelCase_ =torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() lowerCamelCase_ =self._add_vector(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =loop_post_process(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self._get_CLIP_loss(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) print('''CLIP loss''', _SCREAMING_SNAKE_CASE ) if self.log: wandb.log({'''CLIP Loss''': clip_loss} ) clip_loss.backward(retain_graph=_SCREAMING_SNAKE_CASE ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" wandb.init(reinit=_SCREAMING_SNAKE_CASE, project='''face-editor''' ) wandb.config.update({'''Positive Prompts''': positive_prompts} ) wandb.config.update({'''Negative Prompts''': negative_prompts} ) wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} ) if image_path: lowerCamelCase_ =Image.open(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =image.resize((256, 256) ) wandb.log('''Original Image''', wandb.Image(_SCREAMING_SNAKE_CASE ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if not prompts: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =[prompt.strip() for prompt in prompts.split('''|''' )] for prompt in prompts: if isinstance(_SCREAMING_SNAKE_CASE, (tuple, list) ): lowerCamelCase_ =prompt[0] lowerCamelCase_ =float(prompt[1] ) elif ":" in prompt: lowerCamelCase_ =prompt.split(''':''' ) lowerCamelCase_ =float(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ =prompt lowerCamelCase_ =1.0 processed_prompts.append(_SCREAMING_SNAKE_CASE ) weights.append(_SCREAMING_SNAKE_CASE ) return { "prompts": processed_prompts, "weights": torch.tensor(_SCREAMING_SNAKE_CASE, device=self.device ), } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=None, ): """simple docstring""" if image_path: lowerCamelCase_ =self._get_latent(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ =torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) assert pos_prompts, "You must provide at least one positive prompt." lowerCamelCase_ =self.process_prompts(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self.process_prompts(_SCREAMING_SNAKE_CASE ) if save_final and save_path is None: lowerCamelCase_ =os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) ) if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ =save_path + "_" + get_timestamp() os.makedirs(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =save_path lowerCamelCase_ =self.vqgan.decode(self.latent )[0] if show_intermediate: print('''Original Image''' ) show_pil(custom_to_pil(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =loop_post_process(_SCREAMING_SNAKE_CASE ) for iter, transformed_img in enumerate(self._optimize_CLIP(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) ): if show_intermediate: show_pil(_SCREAMING_SNAKE_CASE ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({'''Image''': wandb.Image(_SCREAMING_SNAKE_CASE )} ) if show_final: show_pil(_SCREAMING_SNAKE_CASE ) if save_final: transformed_img.save(os.path.join(self.save_path, f'''iter_{iter:03d}_final.png''' ) )
363
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
6
0
"""simple docstring""" class _UpperCAmelCase : def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any] ): snake_case_ : str = name snake_case_ : List[Any] = value snake_case_ : Union[str, Any] = weight def __repr__( self : Union[str, Any] ): return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def _snake_case ( self : Tuple ): return self.value def _snake_case ( self : Any ): return self.name def _snake_case ( self : Any ): return self.weight def _snake_case ( self : List[Any] ): return self.value / self.weight def __lowercase ( _a , _a , _a ): snake_case_ : Union[str, Any] = [] for i in range(len(_a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowercase ( _a , _a , _a ): snake_case_ : Union[str, Any] = sorted(_a , key=_a , reverse=_a ) snake_case_ : List[Any] = [] snake_case_, snake_case_ : Any = 0.0, 0.0 for i in range(len(_a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowercase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
264
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __lowercase ( _a="" ): snake_case_ : List[str] = tempfile.mkdtemp() return os.path.join(_a , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : str ): snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5 snake_case_ : Optional[int] = AgentAudio(lowercase_ ) snake_case_ : List[str] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowercase_ ) ) # Ensure that the file contains the same value as the original tensor snake_case_, snake_case_ : int = sf.read(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) ) def _snake_case ( self : Optional[int] ): snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5 snake_case_ : List[str] = get_new_path(suffix='''.wav''' ) sf.write(lowercase_ , lowercase_ , 16000 ) snake_case_ : Tuple = AgentAudio(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , lowercase_ ) @require_vision @require_torch class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : Tuple ): snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) ) snake_case_ : str = AgentImage(lowercase_ ) snake_case_ : Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) def _snake_case ( self : str ): snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' snake_case_ : Optional[int] = Image.open(lowercase_ ) snake_case_ : Tuple = AgentImage(lowercase_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) def _snake_case ( self : str ): snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' snake_case_ : Dict = Image.open(lowercase_ ) snake_case_ : List[str] = AgentImage(lowercase_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_ ) ) class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : Any ): snake_case_ : Tuple = '''Hey!''' snake_case_ : Optional[Any] = AgentText(lowercase_ ) self.assertEqual(lowercase_ , agent_type.to_string() ) self.assertEqual(lowercase_ , agent_type.to_raw() ) self.assertEqual(lowercase_ , lowercase_ )
264
1
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : list[int] ): if not numbers: return 0 if not isinstance(UpperCamelCase__ , (list, tuple) ) or not all( isinstance(UpperCamelCase__ , UpperCamelCase__ ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) _UpperCAmelCase : Union[str, Any] = numbers[0] for i in range(1 , len(UpperCamelCase__ ) ): # update the maximum and minimum subarray products _UpperCAmelCase : Tuple = numbers[i] if number < 0: _UpperCAmelCase , _UpperCAmelCase : Tuple = min_till_now, max_till_now _UpperCAmelCase : Union[str, Any] = max(UpperCamelCase__ , max_till_now * number ) _UpperCAmelCase : Dict = min(UpperCamelCase__ , min_till_now * number ) # update the maximum product found till now _UpperCAmelCase : Any = max(UpperCamelCase__ , UpperCamelCase__ ) return max_prod
68
"""simple docstring""" from __future__ import annotations def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): _UpperCAmelCase , _UpperCAmelCase : int = array[indexa], array[indexa] def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if length > 1: _UpperCAmelCase : str = int(length / 2 ) for i in range(UpperCamelCase__ , low + middle ): comp_and_swap(UpperCamelCase__ , UpperCamelCase__ , i + middle , UpperCamelCase__ ) bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) bitonic_merge(UpperCamelCase__ , low + middle , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if length > 1: _UpperCAmelCase : str = int(length / 2 ) bitonic_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 ) bitonic_sort(UpperCamelCase__ , low + middle , UpperCamelCase__ , 0 ) bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Any = input('Enter numbers separated by a comma:\n').strip() _lowerCAmelCase :Tuple = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
68
1
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowercase ( ctypes.Structure): """simple docstring""" A__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowercase_ ( ): """simple docstring""" if os.name == "nt": lowerCamelCase__ : Any = CursorInfo() lowerCamelCase__ : Any = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) lowerCamelCase__ : int = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def lowercase_ ( ): """simple docstring""" if os.name == "nt": lowerCamelCase__ : List[Any] = CursorInfo() lowerCamelCase__ : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) lowerCamelCase__ : Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def lowercase_ ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
184
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge A : Optional[int] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] A : List[Any] = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Dict = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(_A , _A ) lowerCamelCase__ : List[Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Any = "rougeLsum" lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k] lowerCamelCase__ : str = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k] assert score > score_no_sep def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : int = ["rouge1", "rouge2", "rougeL"] lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A ) lowerCamelCase__ : Any = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A ) assert score_sep == score_no_sep def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[Any] = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowerCamelCase__ : Tuple = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : List[str] = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowerCamelCase__ : str = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] , newline_sep=_A )["rougeLsum"] lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Tuple = Path("examples/seq2seq/test_data/wmt_en_ro" ) lowerCamelCase__ : Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(_A , _A ) lowerCamelCase__ : str = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_A ) assert isinstance(_A , _A )
184
1
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() def __A ( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase , lowerCamelCase = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=A , dtype=jnp.bfloataa ) lowerCamelCase , lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=A , from_pt=A , dtype=jnp.bfloataa ) lowerCamelCase = controlnet_params lowerCamelCase = """bird""" lowerCamelCase = jax.device_count() lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples ) lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples ) lowerCamelCase = jax.random.PRNGKey(0 ) lowerCamelCase = jax.random.split(A , jax.device_count() ) lowerCamelCase = replicate(A ) lowerCamelCase = shard(A ) lowerCamelCase = shard(A ) lowerCamelCase = pipe( prompt_ids=A , image=A , params=A , prng_seed=A , num_inference_steps=50 , jit=A , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1] lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def __A ( self ) -> Dict: '''simple docstring''' lowerCamelCase , lowerCamelCase = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=A , dtype=jnp.bfloataa ) lowerCamelCase , lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=A , from_pt=A , dtype=jnp.bfloataa ) lowerCamelCase = controlnet_params lowerCamelCase = """Chef in the kitchen""" lowerCamelCase = jax.device_count() lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples ) lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples ) lowerCamelCase = jax.random.PRNGKey(0 ) lowerCamelCase = jax.random.split(A , jax.device_count() ) lowerCamelCase = replicate(A ) lowerCamelCase = shard(A ) lowerCamelCase = shard(A ) lowerCamelCase = pipe( prompt_ids=A , image=A , params=A , prng_seed=A , num_inference_steps=50 , jit=A , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1] lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
66
import math import tensorflow as tf from packaging import version def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ): '''simple docstring''' lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ ) lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __lowerCamelCase ( lowerCamelCase__ : Dict ): '''simple docstring''' lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ ) lowerCamelCase = tf.cast(math.pi , x.dtype ) lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype ) lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) )) return x * cdf def __lowerCamelCase ( lowerCamelCase__ : Any ): '''simple docstring''' lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ ) return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) ) def __lowerCamelCase ( lowerCamelCase__ : List[Any] ): '''simple docstring''' lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ ) lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype ) lowerCamelCase = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __lowerCamelCase ( lowerCamelCase__ : str ): '''simple docstring''' lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ ) lowerCamelCase = tf.cast(1.7_0_2 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 ) def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=-1 ): '''simple docstring''' lowerCamelCase , lowerCamelCase = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ ) return a * tf.math.sigmoid(lowerCamelCase__ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ ) UpperCAmelCase : Union[str, Any] = tf.keras.activations.gelu UpperCAmelCase : Optional[Any] = approximate_gelu_wrap else: UpperCAmelCase : List[Any] = _gelu UpperCAmelCase : str = _gelu_new UpperCAmelCase : Union[str, Any] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def __lowerCamelCase ( lowerCamelCase__ : List[Any] ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
66
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = tempfile.mkdtemp() a :List[str] = BlipImageProcessor() a :List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) a :int = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) a :str = InstructBlipProcessor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).qformer_tokenizer def SCREAMING_SNAKE_CASE__ ( self ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a :Tuple = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) a :List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) a :Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) a :Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) self.assertIsInstance(processor.qformer_tokenizer , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = self.get_image_processor() a :Tuple = self.get_tokenizer() a :Union[str, Any] = self.get_qformer_tokenizer() a :Optional[Any] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :Optional[int] = self.prepare_image_inputs() a :Optional[int] = image_processor(_lowerCamelCase , return_tensors='''np''' ) a :int = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = self.get_image_processor() a :Any = self.get_tokenizer() a :Dict = self.get_qformer_tokenizer() a :Optional[Any] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :str = '''lower newer''' a :Any = processor(text=_lowerCamelCase ) a :List[Any] = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase ) a :Optional[int] = qformer_tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.get_image_processor() a :str = self.get_tokenizer() a :Union[str, Any] = self.get_qformer_tokenizer() a :Tuple = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :List[Any] = '''lower newer''' a :Union[str, Any] = self.prepare_image_inputs() a :Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.get_image_processor() a :Union[str, Any] = self.get_tokenizer() a :int = self.get_qformer_tokenizer() a :Optional[Any] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a :Optional[Any] = processor.batch_decode(_lowerCamelCase ) a :List[str] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = self.get_image_processor() a :List[Any] = self.get_tokenizer() a :str = self.get_qformer_tokenizer() a :List[str] = InstructBlipProcessor( tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase ) a :int = '''lower newer''' a :Tuple = self.prepare_image_inputs() a :Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
94
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
214
0
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class a : def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int=13 , lowercase_ : List[str]=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : int=99 , lowercase_ : str=32 , lowercase_ : str=5 , lowercase_ : List[Any]=4 , lowercase_ : Dict=37 , lowercase_ : List[str]="gelu" , lowercase_ : int=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : int=50 , lowercase_ : Any=0.02 , lowercase_ : str=True , lowercase_ : int=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = use_labels snake_case_ = scope def A_ ( self : Dict ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = self.get_config() return config, input_ids, input_mask, token_labels def A_ ( self : Optional[int] ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) def A_ ( self : Dict ): ( snake_case_ ) = self.prepare_config_and_inputs() snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def A_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[int] , **lowercase_ : int , ): snake_case_ = BertGenerationEncoder(config=snake_case_ ) model.to(snake_case_ ) model.eval() snake_case_ = model(snake_case_ , attention_mask=snake_case_ ) snake_case_ = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict , **lowercase_ : Optional[Any] , ): snake_case_ = True snake_case_ = BertGenerationEncoder(config=snake_case_ ) model.to(snake_case_ ) model.eval() snake_case_ = model( snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , ) snake_case_ = model( snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , **lowercase_ : Optional[int] , ): snake_case_ = True snake_case_ = True snake_case_ = BertGenerationDecoder(config=snake_case_ ).to(snake_case_ ).eval() # first forward pass snake_case_ = model( snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , ) snake_case_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ = model( snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0] snake_case_ = model( snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0] # select random slice snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) ) def A_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Any , *lowercase_ : Union[str, Any] , ): snake_case_ = BertGenerationDecoder(snake_case_ ) model.to(snake_case_ ) model.eval() snake_case_ = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self : List[str] ): snake_case_ = self.prepare_config_and_inputs() snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( _a , _a , _a , unittest.TestCase ): snake_case_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () snake_case_ = (BertGenerationDecoder,) if is_torch_available() else () snake_case_ = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def A_ ( self : Tuple ): snake_case_ = BertGenerationEncoderTester(self ) snake_case_ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def A_ ( self : List[Any] ): self.config_tester.run_common_tests() def A_ ( self : List[str] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def A_ ( self : Dict ): snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = """bert""" self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def A_ ( self : Union[str, Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case_ ) def A_ ( self : Any ): snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case_ ) def A_ ( self : Optional[int] ): # This regression test was failing with PyTorch < 1.3 ( snake_case_ ) = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ = None self.model_tester.create_and_check_model_as_decoder( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*snake_case_ ) @slow def A_ ( self : int ): snake_case_ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(snake_case_ ) @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : Any ): snake_case_ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) snake_case_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): snake_case_ = model(snake_case_ )[0] snake_case_ = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , snake_case_ ) snake_case_ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) ) @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : Any ): snake_case_ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) snake_case_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): snake_case_ = model(snake_case_ )[0] snake_case_ = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape , snake_case_ ) snake_case_ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
371
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class a : def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=13 , lowercase_ : int=64 , lowercase_ : Tuple=2 , lowercase_ : List[str]=3 , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : int=32 , lowercase_ : int=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : Any=10 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=[1, 16, 4, 4] , lowercase_ : Tuple=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = scope snake_case_ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size snake_case_ = (self.image_size // 32) ** 2 snake_case_ = num_patches + 1 def A_ ( self : List[Any] ): snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = self.get_config() return config, pixel_values, labels def A_ ( self : Any ): snake_case_ = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , ) def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : int ): snake_case_ = ViTHybridModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[int] ): snake_case_ = self.type_sequence_label_size snake_case_ = ViTHybridForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self : List[Any] ): snake_case_ = self.prepare_config_and_inputs() snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () snake_case_ = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False def A_ ( self : Optional[Any] ): snake_case_ = ViTHybridModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def A_ ( self : Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def A_ ( self : Any ): pass def A_ ( self : Dict ): snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def A_ ( self : Dict ): snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowercase_ ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_ ) def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def A_ ( self : Optional[Any] ): snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = _config_zero_init(lowercase_ ) for model_class in self.all_model_classes: snake_case_ = model_class(config=lowercase_ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": snake_case_ = [F"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def A_ ( self : Tuple ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = ViTHybridModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __magic_name__ ( ) -> List[Any]: '''simple docstring''' snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def A_ ( self : Any ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self : List[str] ): snake_case_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowercase_ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ ) # forward pass with torch.no_grad(): snake_case_ = model(**lowercase_ ) # verify the logits snake_case_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) snake_case_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) ) @slow @require_accelerate def A_ ( self : Dict ): snake_case_ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) snake_case_ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' ) snake_case_ = prepare_img() snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ) snake_case_ = model(**lowercase_ ) snake_case_ = outputs.logits # model predicts one of the 1000 ImageNet classes snake_case_ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
72
0
import requests from bsa import BeautifulSoup def A ( _lowerCamelCase = "AAPL" ): '''simple docstring''' _lowerCAmelCase : str = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" _lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" ) _lowerCAmelCase : List[Any] = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
36
from __future__ import annotations import typing from collections import Counter def __lowerCAmelCase ( a__ ) -> typing.Counter[int]: __a = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(a__ , max_perimeter + 1 ): __a = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a__ ): __a = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __lowerCAmelCase ( a__ = 1000 ) -> int: __a = pythagorean_triple(a__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"Perimeter {solution()} has maximum solutions")
6
0
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ : Union[str, Any] = 1_6 lowerCAmelCase_ : Optional[Any] = 3_2 def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 16 ) -> Any: '''simple docstring''' UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCAmelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase , max_length=lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase = datasets.map( lowerCAmelCase , batched=lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase = 8 else: UpperCAmelCase = None return tokenizer.pad( lowerCAmelCase , padding="""longest""" , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase , drop_last=lowerCAmelCase ) UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ) -> List[str]: '''simple docstring''' UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config["""lr"""] UpperCAmelCase = int(config["""num_epochs"""] ) UpperCAmelCase = int(config["""seed"""] ) UpperCAmelCase = int(config["""batch_size"""] ) UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase = get_dataloaders(lowerCAmelCase , lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = AdamW(params=model.parameters() , lr=lowerCAmelCase ) # Instantiate scheduler UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Now we train the model for epoch in range(lowerCAmelCase ): model.train() for step, batch in enumerate(lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase = model(**lowerCAmelCase ) UpperCAmelCase = outputs.loss UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase = model(**lowerCAmelCase ) UpperCAmelCase = outputs.logits.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCAmelCase , references=lowerCAmelCase , ) UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase ) def _lowerCAmelCase ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCAmelCase , default=lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase , lowerCAmelCase ) if __name__ == "__main__": main()
359
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=a_ ) class UpperCamelCase_ ( a_ ): _A : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _A : ClassVar[Features] = Features({'image': Image()} ) _A : ClassVar[Features] = Features({'labels': ClassLabel} ) _A : str = "image" _A : str = "labels" def UpperCamelCase_ ( self , snake_case__ ) -> List[str]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , snake_case__ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) UpperCAmelCase = copy.deepcopy(self ) UpperCAmelCase = self.label_schema.copy() UpperCAmelCase = features[self.label_column] UpperCAmelCase = label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
248
0
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ) -> Tuple: '''simple docstring''' A__ = tokenizer A__ = tokenizer.bos_token_id A__ = dataset A__ = seq_length A__ = seq_length * chars_per_token * num_of_sequences def __iter__( self ) -> Tuple: '''simple docstring''' A__ = iter(self.dataset ) A__ = True while more_examples: A__ , A__ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowercase )["content"] ) buffer_len += len(buffer[-1] ) except StopIteration: A__ = False break A__ = tokenizer(lowercase , truncation=lowercase )["input_ids"] A__ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(lowercase ) , self.seq_length ): A__ = all_token_ids[i : i + self.seq_length] if len(lowercase ) == self.seq_length: yield torch.tensor(lowercase ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> List[str]: '''simple docstring''' A__ = {"streaming": True} A__ = load_dataset(args.dataset_name , split="train" , **SCREAMING_SNAKE_CASE_ ) A__ = ConstantLengthDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , seq_length=args.seq_length ) A__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size ) return eval_dataloader def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int: '''simple docstring''' model.eval() A__ = [] for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) A__ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(SCREAMING_SNAKE_CASE_ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break A__ = torch.mean(torch.cat(SCREAMING_SNAKE_CASE_ ) ) try: A__ = torch.exp(SCREAMING_SNAKE_CASE_ ) except OverflowError: A__ = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase__ = Accelerator() # Parse configuration lowerCAmelCase__ = HfArgumentParser(EvaluationArguments) lowerCAmelCase__ = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase__ = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase__ = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") lowerCAmelCase__ , lowerCAmelCase__ = evaluate(args) logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
68
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
1
def _a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : set ): __lowerCAmelCase , __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __lowerCAmelCase = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
356
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 16 UpperCamelCase__ = 32 def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ): __lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" ) __lowerCAmelCase = load_dataset("glue" , "mrpc" ) def tokenize_function(SCREAMING_SNAKE_CASE_ : str ): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE_ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ = mocked_dataloaders # noqa: F811 def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1": __lowerCAmelCase = 2 # New Code # __lowerCAmelCase = int(args.gradient_accumulation_steps ) # Initialize accelerator __lowerCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config["lr"] __lowerCAmelCase = int(config["num_epochs"] ) __lowerCAmelCase = int(config["seed"] ) __lowerCAmelCase = int(config["batch_size"] ) __lowerCAmelCase = evaluate.load("glue" , "mrpc" ) set_seed(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = output.loss accelerator.backward(SCREAMING_SNAKE_CASE_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = outputs.logits.argmax(dim=-1 ) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
102
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_ :str = inspect.getfile(accelerate.test_utils ) snake_case_ :List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) snake_case_ :Optional[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) snake_case_ :str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def lowerCAmelCase_ ( self: Optional[Any] ) -> List[str]: print(f"""Found {torch.cuda.device_count()} devices.""" ) snake_case_ :Any = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self: Optional[Any] ) -> str: print(f"""Found {torch.cuda.device_count()} devices.""" ) snake_case_ :str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: snake_case_ :List[Any] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self: str ) -> List[str]: print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) snake_case_ :Optional[int] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": __a = Accelerator() __a = (accelerator.state.process_index + 2, 10) __a = torch.randint(0, 10, shape).to(accelerator.device) __a = "" __a = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __a = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __a = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
66
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : '''simple docstring''' def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict: snake_case_ :Dict = parent snake_case_ :List[Any] = batch_size snake_case_ :Dict = image_size snake_case_ :Dict = patch_size snake_case_ :Tuple = num_channels snake_case_ :List[Any] = embed_dim snake_case_ :List[str] = depths snake_case_ :str = num_heads snake_case_ :Tuple = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :int = qkv_bias snake_case_ :Tuple = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Dict = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Any = use_absolute_embeddings snake_case_ :int = patch_norm snake_case_ :List[Any] = layer_norm_eps snake_case_ :Tuple = initializer_range snake_case_ :str = is_training snake_case_ :int = scope snake_case_ :Tuple = use_labels snake_case_ :Tuple = type_sequence_label_size snake_case_ :str = encoder_stride snake_case_ :List[Any] = out_features snake_case_ :str = out_indices def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :str = None if self.use_labels: snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: int ) -> Optional[Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any: snake_case_ :Dict = MaskFormerSwinModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[Any] = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case ): snake_case_ :Optional[Any] = ["""stem"""] snake_case_ :str = MaskFormerSwinBackbone(config=snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_ :Optional[int] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :str = config_and_inputs snake_case_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} _A : List[str] = False _A : Any = False _A : Dict = False _A : List[Any] = False _A : Optional[int] = False def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case_ :str = MaskFormerSwinModelTester(self ) snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Any ) -> Tuple: return def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: str ) -> List[str]: pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :str = [*signature.parameters.keys()] snake_case_ :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowerCAmelCase_ ( self: Dict ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str: snake_case_ :List[str] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :Any = outputs.hidden_states snake_case_ :Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swin has a different seq_length snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = 3 snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Any = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: List[str] ) -> str: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: str ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case: str ): snake_case_ :Optional[int] = 0 return t def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ): with torch.no_grad(): snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case ) snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple() def recursive_check(snake_case: List[Any] , snake_case: int ): if isinstance(snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ): recursive_check(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case , snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has""" f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}.""" ) , ) recursive_check(snake_case , snake_case ) for model_class in self.all_model_classes: snake_case_ :int = model_class(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) @require_torch class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ): '''simple docstring''' _A : int = (MaskFormerSwinBackbone,) if is_torch_available() else () _A : Tuple = MaskFormerSwinConfig def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: snake_case_ :List[str] = backbone_class(snake_case ) backbone.to(snake_case ) backbone.eval() snake_case_ :List[Any] = backbone(**snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case ) self.assertIsNotNone(outputs.attentions )
66
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __UpperCAmelCase ( __a : int ) -> bool: """simple docstring""" _a : int = int(number**0.5 ) return number == sq * sq def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ,__a : int ,__a : int ,__a : int ) -> tuple[int, int]: """simple docstring""" _a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _a : int = x_den * y_den * z_den _a : int = gcd(__a ,__a ) top //= hcf bottom //= hcf return top, bottom def __UpperCAmelCase ( __a : int = 35 ) -> int: """simple docstring""" _a : set = set() _a : int _a : Fraction = Fraction(0 ) _a : tuple[int, int] for x_num in range(1 ,order + 1 ): for x_den in range(x_num + 1 ,order + 1 ): for y_num in range(1 ,order + 1 ): for y_den in range(y_num + 1 ,order + 1 ): # n=1 _a : Optional[Any] = x_num * y_den + x_den * y_num _a : Any = x_den * y_den _a : List[str] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : List[Any] = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) # n=2 _a : Dict = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _a : Tuple = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): _a : List[str] = int(sqrt(__a ) ) _a : List[str] = int(sqrt(__a ) ) _a : List[Any] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : List[Any] = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) # n=-1 _a : Optional[Any] = x_num * y_num _a : List[Any] = x_den * y_num + x_num * y_den _a : List[Any] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : Any = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) # n=2 _a : int = x_num * x_num * y_num * y_num _a : Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): _a : str = int(sqrt(__a ) ) _a : Optional[int] = int(sqrt(__a ) ) _a : Optional[int] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : Optional[int] = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a ,__a ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
366
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A : Tuple =logging.get_logger(__name__) _A : str ={ '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class _lowercase ( _lowercase ): a = """mobilenet_v2""" def __init__( self: List[Any] , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: Dict=224 , UpperCamelCase__: Optional[Any]=1.0 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[str]=6 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: str=True , UpperCamelCase__: Dict="relu6" , UpperCamelCase__: Dict=True , UpperCamelCase__: Union[str, Any]=0.8 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: str=0.001 , UpperCamelCase__: Union[str, Any]=255 , **UpperCamelCase__: Dict , ): super().__init__(**UpperCamelCase__ ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) lowerCamelCase__ : str = num_channels lowerCamelCase__ : Tuple = image_size lowerCamelCase__ : str = depth_multiplier lowerCamelCase__ : Optional[Any] = depth_divisible_by lowerCamelCase__ : List[str] = min_depth lowerCamelCase__ : Tuple = expand_ratio lowerCamelCase__ : Union[str, Any] = output_stride lowerCamelCase__ : str = first_layer_is_expansion lowerCamelCase__ : List[Any] = finegrained_output lowerCamelCase__ : Tuple = hidden_act lowerCamelCase__ : int = tf_padding lowerCamelCase__ : List[str] = classifier_dropout_prob lowerCamelCase__ : Optional[Any] = initializer_range lowerCamelCase__ : Union[str, Any] = layer_norm_eps lowerCamelCase__ : List[str] = semantic_loss_ignore_index class _lowercase ( _lowercase ): a = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self: Tuple ): return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def lowerCamelCase_ ( self: Optional[Any] ): if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def lowerCamelCase_ ( self: str ): return 1e-4
41
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
0
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _snake_case ( ) -> Dict: lowerCamelCase_ : int =argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=lowerCamelCase__ , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=lowerCamelCase__ , default=5 ) parser.add_argument("--batch_size" , type=lowerCamelCase__ , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=lowerCamelCase__ , default=1 ) parser.add_argument("--freeze" , type=lowerCamelCase__ , default=lowerCamelCase__ ) parser.add_argument("--learning_rate" , type=lowerCamelCase__ , default=5e-4 ) parser.add_argument("--seed" , type=lowerCamelCase__ , default=0 ) parser.add_argument("--lr_scheduler_type" , type=lowerCamelCase__ , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=lowerCamelCase__ , default=10 ) parser.add_argument("--weight_decay" , type=lowerCamelCase__ , default=0.01 ) parser.add_argument("--output_dir" , type=lowerCamelCase__ , default="./results" ) return parser.parse_args() A__ : List[Any] = load('accuracy') def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]: lowerCamelCase_ : Dict =eval_pred lowerCamelCase_ : Tuple =np.argmax(lowerCamelCase__ , axis=1 ) return metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ ) class lowercase__ ( snake_case__ ): def __init__( self : int , snake_case__ : int ): super().__init__() lowerCamelCase_ : Optional[int] =trainer def UpperCAmelCase__ ( self : str , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[str] , **snake_case__ : List[str] ): if control.should_evaluate: lowerCamelCase_ : List[str] =deepcopy(snake_case__ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _snake_case ( ) -> Any: lowerCamelCase_ : int =get_args() set_seed(args.seed ) lowerCamelCase_ : List[Any] =load_dataset("codeparrot/codecomplex" , split="train" ) lowerCamelCase_ : List[Any] =dataset.train_test_split(test_size=0.2 ) lowerCamelCase_ : Union[str, Any] =train_test["test"].train_test_split(test_size=0.5 ) lowerCamelCase_ : Dict =DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) lowerCamelCase_ : Optional[Any] =AutoTokenizer.from_pretrained(args.model_ckpt ) lowerCamelCase_ : List[Any] =tokenizer.eos_token lowerCamelCase_ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) lowerCamelCase_ : Optional[Any] =model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): lowerCamelCase_ : Dict =False lowerCamelCase_ : Dict =ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowerCamelCase__ : List[Any] ): lowerCamelCase_ : Any =tokenizer(example["src"] , truncation=lowerCamelCase__ , max_length=1_024 ) lowerCamelCase_ : Dict =labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } lowerCamelCase_ : Union[str, Any] =train_test_validation.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=train_test_validation["train"].column_names , ) lowerCamelCase_ : Optional[Any] =DataCollatorWithPadding(tokenizer=lowerCamelCase__ ) lowerCamelCase_ : int =TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) lowerCamelCase_ : Any =Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , ) print("Training..." ) trainer.add_callback(CustomCallback(lowerCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
361
"""simple docstring""" def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]: if not head: return True # split the list to two parts lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =head.next, head while fast and fast.next: lowerCamelCase_ : Optional[Any] =fast.next.next lowerCamelCase_ : str =slow.next lowerCamelCase_ : Tuple =slow.next lowerCamelCase_ : Any =None # Don't forget here! But forget still works! # reverse the second part lowerCamelCase_ : List[str] =None while second: lowerCamelCase_ : Any =second.next lowerCamelCase_ : Union[str, Any] =node lowerCamelCase_ : Union[str, Any] =second lowerCamelCase_ : Optional[Any] =nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False lowerCamelCase_ : List[str] =node.next lowerCamelCase_ : Optional[Any] =head.next return True def _snake_case ( lowerCamelCase__ : str ) -> Optional[int]: if not head or not head.next: return True # 1. Get the midpoint (slow) lowerCamelCase_ : List[str] =head while fast and fast.next: lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =fast.next.next, slow.next # 2. Push the second half into the stack lowerCamelCase_ : List[Any] =[slow.val] while slow.next: lowerCamelCase_ : List[Any] =slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False lowerCamelCase_ : Union[str, Any] =cur.next return True def _snake_case ( lowerCamelCase__ : Dict ) -> Optional[Any]: if not head or not head.next: return True lowerCamelCase_ : Union[str, Any] ={} lowerCamelCase_ : List[Any] =0 while head: if head.val in d: d[head.val].append(lowerCamelCase__ ) else: lowerCamelCase_ : List[str] =[pos] lowerCamelCase_ : Optional[int] =head.next pos += 1 lowerCamelCase_ : Union[str, Any] =pos - 1 lowerCamelCase_ : Optional[int] =0 for v in d.values(): if len(lowerCamelCase__ ) % 2 != 0: middle += 1 else: lowerCamelCase_ : Optional[Any] =0 for i in range(0 , len(lowerCamelCase__ ) ): if v[i] + v[len(lowerCamelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
209
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowercase ( unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowerCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[Any] = TextaTextGenerationPipeline(model=_lowercase , tokenizer=_lowercase ) return generator, ["Something to write", "Something else"] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a : Any = generator('''Something there''' ) self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) ) __a : Any = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_lowercase ) self.assertEqual( _lowercase , [ [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], ] , ) __a : str = generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase ) self.assertEqual( _lowercase , [ [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], ] , ) with self.assertRaises(_lowercase ): generator(4 ) @require_torch def _lowerCamelCase ( self ): __a : Union[str, Any] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' ) # do_sample=False necessary for reproducibility __a : List[str] = generator('''Something there''' , do_sample=_lowercase ) self.assertEqual(_lowercase , [{'''generated_text''': ''''''}] ) __a : Dict = 3 __a : Optional[Any] = generator( '''Something there''' , num_return_sequences=_lowercase , num_beams=_lowercase , ) __a : Dict = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_lowercase , _lowercase ) __a : Optional[Any] = generator('''This is a test''' , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase ) self.assertEqual( _lowercase , [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ] , ) __a : List[Any] = generator.model.config.eos_token_id __a : Optional[Any] = """<pad>""" __a : Optional[int] = generator( ['''This is a test''', '''This is a second test'''] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , ) self.assertEqual( _lowercase , [ [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ], [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ], ] , ) @require_tf def _lowerCamelCase ( self ): __a : Tuple = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' ) # do_sample=False necessary for reproducibility __a : List[Any] = generator('''Something there''' , do_sample=_lowercase ) self.assertEqual(_lowercase , [{'''generated_text''': ''''''}] )
160
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json __snake_case : Optional[int] = """sshleifer/mar_enro_6_3_student""" class A__(a_ ): """simple docstring""" def UpperCamelCase__ ( self ) -> Tuple: super().setUp() a_ : Union[str, Any] = cached_path( """https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_lowercase , ) a_ : Union[str, Any] = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k''' @slow @require_torch_gpu def UpperCamelCase__ ( self ) -> Tuple: MarianMTModel.from_pretrained(_lowercase ) @slow @require_torch_gpu def UpperCamelCase__ ( self ) -> int: a_ : Any = { """$MAX_LEN""": 64, """$BS""": 64, """$GAS""": 1, """$ENRO_DIR""": self.data_dir, """facebook/mbart-large-cc25""": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", """--learning_rate=3e-5""": """--learning_rate 3e-4""", """--num_train_epochs 6""": """--num_train_epochs 1""", } # Clean up bash script a_ : List[str] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip() a_ : Dict = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) for k, v in env_vars_to_replace.items(): a_ : Optional[int] = bash_script.replace(_lowercase , str(_lowercase ) ) a_ : int = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") a_ : Dict = F''' --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 '''.split() # XXX: args.gpus > 1 : handle multi_gpu in the future a_ : Union[str, Any] = ["""finetune.py"""] + bash_script.split() + args with patch.object(_lowercase , """argv""" , _lowercase ): a_ : Optional[Any] = argparse.ArgumentParser() a_ : Tuple = pl.Trainer.add_argparse_args(_lowercase ) a_ : Any = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() ) a_ : str = parser.parse_args() a_ : Union[str, Any] = main(_lowercase ) # Check metrics a_ : Any = load_json(model.metrics_save_path ) a_ : List[Any] = metrics["""val"""][0] a_ : Union[str, Any] = metrics["""val"""][-1] self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase ) self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.0_1 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict a_ : Optional[Any] = os.listdir(_lowercase ) a_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0] a_ : str = os.path.join(args.output_dir , _lowercase ) a_ : Any = torch.load(_lowercase , map_location="""cpu""" ) a_ : Union[str, Any] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: a_ : List[Any] = {os.path.basename(_lowercase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1 class A__(a_ ): """simple docstring""" @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def UpperCamelCase__ ( self ) -> Optional[Any]: a_ : Tuple = F'''{self.test_file_dir_str}/test_data/wmt_en_ro''' a_ : str = { """--fp16_opt_level=O1""": """""", """$MAX_LEN""": 128, """$BS""": 16, """$GAS""": 1, """$ENRO_DIR""": data_dir, """$m""": """sshleifer/student_marian_en_ro_6_1""", """val_check_interval=0.25""": """val_check_interval=1.0""", } # Clean up bash script a_ : Union[str, Any] = ( (self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip() ) a_ : Union[str, Any] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) a_ : Any = bash_script.replace("""--fp16 """ , """ """ ) for k, v in env_vars_to_replace.items(): a_ : Dict = bash_script.replace(_lowercase , str(_lowercase ) ) a_ : int = self.get_auto_remove_tmp_dir() a_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" ) a_ : List[str] = 6 a_ : str = ( ["""distillation.py"""] + bash_script.split() + [ F'''--output_dir={output_dir}''', """--gpus=1""", """--learning_rate=1e-3""", F'''--num_train_epochs={epochs}''', """--warmup_steps=10""", """--val_check_interval=1.0""", """--do_predict""", ] ) with patch.object(_lowercase , """argv""" , _lowercase ): a_ : int = argparse.ArgumentParser() a_ : Any = pl.Trainer.add_argparse_args(_lowercase ) a_ : str = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() ) a_ : Any = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu a_ : Dict = distill_main(_lowercase ) # Check metrics a_ : Any = load_json(model.metrics_save_path ) a_ : int = metrics["""val"""][0] a_ : Union[str, Any] = metrics["""val"""][-1] assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.0_1 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase ) # check lightning ckpt can be loaded and has a reasonable statedict a_ : Dict = os.listdir(_lowercase ) a_ : List[Any] = [x for x in contents if x.endswith(""".ckpt""" )][0] a_ : int = os.path.join(args.output_dir , _lowercase ) a_ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""" ) a_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: a_ : List[str] = {os.path.basename(_lowercase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1
248
0
from __future__ import annotations def UpperCamelCase__( UpperCamelCase__ : list )->float: if not nums: raise ValueError('''List is empty''' ) return sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
39
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) __SCREAMING_SNAKE_CASE = '''CIDAS/clipseg-rd64-refined''' __SCREAMING_SNAKE_CASE = '''image_segmenter''' __SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation __SCREAMING_SNAKE_CASE = ['''image''', '''text'''] __SCREAMING_SNAKE_CASE = ['''image'''] def __init__( self,*__lowerCamelCase,**__lowerCamelCase ): requires_backends(self,['''vision'''] ) super().__init__(*__lowerCamelCase,**__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ): return self.pre_processor(text=[label],images=[image],padding=__lowerCamelCase,return_tensors='''pt''' ) def UpperCamelCase ( self,__lowerCamelCase ): with torch.no_grad(): A__ = self.model(**__lowerCamelCase ).logits return logits def UpperCamelCase ( self,__lowerCamelCase ): A__ = outputs.cpu().detach().numpy() A__ = 0 A__ = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
39
1
'''simple docstring''' import sys from collections import defaultdict class __A : def __init__(self : Optional[Any] ): UpperCAmelCase_ = [] def _lowercase (self : Union[str, Any] , __a : List[Any] ): return self.node_position[vertex] def _lowercase (self : Union[str, Any] , __a : Optional[Any] , __a : Dict ): UpperCAmelCase_ = pos def _lowercase (self : str , __a : str , __a : Dict , __a : Optional[Any] , __a : str ): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCAmelCase_ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCAmelCase_ = 2 * start + 1 else: UpperCAmelCase_ = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCAmelCase_ = heap[smallest_child], positions[smallest_child] UpperCAmelCase_ = ( heap[start], positions[start], ) UpperCAmelCase_ = temp, tempa UpperCAmelCase_ = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , a_ ) self.top_to_bottom(a_ , a_ , a_ , a_ ) def _lowercase (self : Any , __a : Dict , __a : Union[str, Any] , __a : Optional[Any] , __a : List[str] ): UpperCAmelCase_ = position[index] while index != 0: UpperCAmelCase_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCAmelCase_ = heap[parent] UpperCAmelCase_ = position[parent] self.set_position(position[parent] , a_ ) else: UpperCAmelCase_ = val UpperCAmelCase_ = temp self.set_position(a_ , a_ ) break UpperCAmelCase_ = parent else: UpperCAmelCase_ = val UpperCAmelCase_ = temp self.set_position(a_ , 0 ) def _lowercase (self : List[str] , __a : Optional[int] , __a : int ): UpperCAmelCase_ = len(a_ ) // 2 - 1 for i in range(a_ , -1 , -1 ): self.top_to_bottom(a_ , a_ , len(a_ ) , a_ ) def _lowercase (self : List[str] , __a : Dict , __a : str ): UpperCAmelCase_ = positions[0] UpperCAmelCase_ = sys.maxsize self.top_to_bottom(a_ , 0 , len(a_ ) , a_ ) return temp def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Heap() UpperCAmelCase_ = [0] * len(_snake_case ) UpperCAmelCase_ = [-1] * len(_snake_case ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCAmelCase_ = [] # Heap of Distance of vertices from their neighboring vertex UpperCAmelCase_ = [] for vertex in range(len(_snake_case ) ): distance_tv.append(sys.maxsize ) positions.append(_snake_case ) heap.node_position.append(_snake_case ) UpperCAmelCase_ = [] UpperCAmelCase_ = 1 UpperCAmelCase_ = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCAmelCase_ = 0 UpperCAmelCase_ = distance heap.heapify(_snake_case , _snake_case ) for _ in range(1 , len(_snake_case ) ): UpperCAmelCase_ = heap.delete_minimum(_snake_case , _snake_case ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCAmelCase_ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_snake_case )] ): UpperCAmelCase_ = distance heap.bottom_to_top( _snake_case , heap.get_position(_snake_case ) , _snake_case , _snake_case ) UpperCAmelCase_ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > SCREAMING_SNAKE_CASE_: Tuple =int(input('Enter number of edges: ').strip()) SCREAMING_SNAKE_CASE_: List[Any] =defaultdict(list) for _ in range(edges_number): SCREAMING_SNAKE_CASE_: Optional[Any] =[int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
1
"""simple docstring""" import math def lowercase ( _snake_case : int ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _snake_case : float = 0.1 ) ->int: """simple docstring""" __snake_case : Tuple = 3 __snake_case : Any = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
102
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCamelCase__ ) class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): """simple docstring""" lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) lowercase : ClassVar[Features] = Features({'text': Value('string' )} ) lowercase : ClassVar[Features] = Features({'labels': ClassLabel} ) lowercase : str = "text" lowercase : str = "labels" def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __UpperCamelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __UpperCamelCase : int = copy.deepcopy(self ) __UpperCamelCase : List[Any] = self.label_schema.copy() __UpperCamelCase : Union[str, Any] = features[self.label_column] __UpperCamelCase : Optional[Any] = label_schema return task_template @property def __lowerCamelCase ( self ) -> Dict[str, str]: '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
171
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Union[str, Any] = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
171
1
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list: if len(a_ ) <= 1: return lst lowerCamelCase__ : Dict = 1 while i < len(a_ ): if lst[i - 1] <= lst[i]: i += 1 else: lowerCamelCase__ , lowerCamelCase__ : Dict = lst[i], lst[i - 1] i -= 1 if i == 0: lowerCamelCase__ : Tuple = 1 return lst if __name__ == "__main__": _UpperCAmelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() _UpperCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
50
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
0
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _lowerCAmelCase : def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError("Destination width/height should be > 0" ) snake_case : List[Any] = img snake_case : Dict = img.shape[1] snake_case : Union[str, Any] = img.shape[0] snake_case : Optional[Any] = dst_width snake_case : List[Any] = dst_height snake_case : int = self.src_w / self.dst_w snake_case : Union[str, Any] = self.src_h / self.dst_h snake_case : Dict = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): snake_case : int = self.img[self.get_y(UpperCamelCase__ )][self.get_x(UpperCamelCase__ )] def lowerCamelCase ( self , UpperCamelCase__ ) -> int: '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase ( self , UpperCamelCase__ ) -> int: '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": __snake_case , __snake_case = 800, 600 __snake_case = imread("""image_data/lena.jpg""", 1) __snake_case = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
352
"""simple docstring""" def __lowerCAmelCase ( lowercase : int ) -> int: """simple docstring""" if not isinstance(lowercase , lowercase ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
112
0
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple: '''simple docstring''' print(F'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(__snake_case ): print(F'''{i}\t\t{d}''' ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: '''simple docstring''' for j in range(__snake_case ): lowercase_ , lowercase_ , lowercase_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[float]: '''simple docstring''' lowercase_ = [float("""inf""" )] * vertex_count lowercase_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(__snake_case ): lowercase_ , lowercase_ , lowercase_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: lowercase_ = distance[u] + w lowercase_ = check_negative_cycle(__snake_case , __snake_case , __snake_case ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = int(input("Enter number of vertices: ").strip()) UpperCAmelCase : Any = int(input("Enter number of edges: ").strip()) UpperCAmelCase : int = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) UpperCAmelCase : Optional[int] = {"src": src, "dst": dest, "weight": weight} UpperCAmelCase : Tuple = int(input("\nEnter shortest path source:").strip()) UpperCAmelCase : Optional[int] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
136
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """Speech2TextFeatureExtractor""" lowerCAmelCase_ = """Speech2TextTokenizer""" def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' super().__init__(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.feature_extractor lowerCamelCase__ = False def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) lowerCamelCase__ = kwargs.pop('''raw_speech''' ) else: lowerCamelCase__ = kwargs.pop('''audio''' , __lowerCAmelCase ) lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) lowerCamelCase__ = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = args[0] lowerCamelCase__ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowerCamelCase__ = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: lowerCamelCase__ = encodings['''input_ids'''] return inputs def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) lowerCamelCase__ = True lowerCamelCase__ = self.tokenizer yield lowerCamelCase__ = self.feature_extractor lowerCamelCase__ = False
209
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase__ = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
352
'''simple docstring''' class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = n UpperCAmelCase__ : Union[str, Any] = [None] * self.n UpperCAmelCase__ : Tuple = 0 # index of the first element UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = 0 def __len__( self : Optional[Any] ): '''simple docstring''' return self.size def lowercase_ ( self : Dict ): '''simple docstring''' return self.size == 0 def lowercase_ ( self : List[str] ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) UpperCAmelCase__ : str = data UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n self.size += 1 return self def lowercase_ ( self : List[Any] ): '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) UpperCAmelCase__ : Any = self.array[self.front] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Tuple = (self.front + 1) % self.n self.size -= 1 return temp
299
0
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowerCamelCase ( snake_case__): """simple docstring""" def __init__( self , *UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ): """simple docstring""" super().__init__(*UpperCAmelCase , **UpperCAmelCase ) _UpperCAmelCase = eval_examples _UpperCAmelCase = post_process_function def UpperCamelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = "eval" ): """simple docstring""" _UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCAmelCase = self.get_eval_dataloader(UpperCAmelCase ) _UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase = self.compute_metrics _UpperCAmelCase = None _UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _UpperCAmelCase = time.time() try: _UpperCAmelCase = eval_loop( UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , metric_key_prefix=UpperCAmelCase , ) finally: _UpperCAmelCase = compute_metrics _UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCAmelCase , UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _UpperCAmelCase = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions ) _UpperCAmelCase = self.compute_metrics(UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCAmelCase = metrics.pop(UpperCAmelCase ) metrics.update(output.metrics ) else: _UpperCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase ) return metrics def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase = "test" ): """simple docstring""" _UpperCAmelCase = self.get_test_dataloader(UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase = self.compute_metrics _UpperCAmelCase = None _UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _UpperCAmelCase = time.time() try: _UpperCAmelCase = eval_loop( UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , metric_key_prefix=UpperCAmelCase , ) finally: _UpperCAmelCase = compute_metrics _UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCAmelCase , UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _UpperCAmelCase = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions , 'predict' ) _UpperCAmelCase = self.compute_metrics(UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCAmelCase = metrics.pop(UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase )
39
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ): """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _UpperCAmelCase = model(UpperCAmelCase ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _UpperCAmelCase = model(UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(UpperCAmelCase ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCamelCase ( self ): """simple docstring""" pass def UpperCamelCase ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self ): """simple docstring""" return def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCAmelCase ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCamelCase ( self ): """simple docstring""" pass def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(UpperCAmelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCamelCase ( self ): """simple docstring""" pass def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCamelCase ( self ): """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCamelCase ( self ): """simple docstring""" pass def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCAmelCase ): _UpperCAmelCase = 0 return t def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ): with torch.no_grad(): _UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ) _UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple() def recursive_check(UpperCAmelCase , UpperCAmelCase ): if isinstance(UpperCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ): recursive_check(UpperCAmelCase , UpperCAmelCase ) elif isinstance(UpperCAmelCase , UpperCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(UpperCAmelCase , UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has""" F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}.""" ) , ) recursive_check(UpperCAmelCase , UpperCAmelCase ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) _UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} ) @require_torch class __lowerCamelCase ( unittest.TestCase , snake_case__): """simple docstring""" UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else () UpperCamelCase__ = MaskFormerSwinConfig def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(UpperCAmelCase ) backbone.to(UpperCAmelCase ) backbone.eval() _UpperCAmelCase = backbone(**UpperCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , UpperCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase ) self.assertIsNotNone(outputs.attentions )
39
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[int] , _A : Tuple=13 , _A : Union[str, Any]=7 , _A : str=True , _A : Dict=True , _A : str=True , _A : Union[str, Any]=True , _A : str=99 , _A : List[Any]=64 , _A : List[str]=5 , _A : Any=4 , _A : List[Any]=37 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=0.1 , _A : Tuple=0.1 , _A : Optional[Any]=512 , _A : Dict=16 , _A : Tuple=2 , _A : int=0.0_2 , _A : int=3 , _A : Tuple=4 , _A : Optional[int]=None , ) -> Optional[int]: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[int] = seq_length snake_case_ : str = is_training snake_case_ : Optional[int] = use_input_mask snake_case_ : Any = use_token_type_ids snake_case_ : Union[str, Any] = use_labels snake_case_ : Union[str, Any] = vocab_size snake_case_ : Union[str, Any] = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : List[str] = attention_probs_dropout_prob snake_case_ : Dict = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : Tuple = type_sequence_label_size snake_case_ : str = initializer_range snake_case_ : int = num_labels snake_case_ : List[Any] = num_choices snake_case_ : Dict = scope snake_case_ : Optional[Any] = vocab_size - 1 def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : int = None if self.use_input_mask: snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Dict = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : str = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase_ ( self : int ) -> str: """simple docstring""" return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : str = self.prepare_config_and_inputs() snake_case_ : Tuple = True return config, input_ids, input_mask, token_labels def UpperCAmelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[Any] ) -> Optional[int]: """simple docstring""" snake_case_ : Any = GPTNeoXModel(config=_A ) model.to(_A ) model.eval() snake_case_ : List[Any] = model(_A , attention_mask=_A ) snake_case_ : List[str] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : Dict , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : List[str] = True snake_case_ : Optional[Any] = GPTNeoXModel(_A ) model.to(_A ) model.eval() snake_case_ : Optional[int] = model(_A , attention_mask=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : str , _A : Optional[int] , _A : Any , _A : Optional[Any] , _A : Optional[int] ) -> Optional[Any]: """simple docstring""" snake_case_ : Tuple = GPTNeoXForCausalLM(config=_A ) model.to(_A ) model.eval() snake_case_ : int = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : str , _A : int , _A : List[Any] , _A : List[Any] , _A : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.num_labels snake_case_ : int = GPTNeoXForQuestionAnswering(_A ) model.to(_A ) model.eval() snake_case_ : List[Any] = model(_A , attention_mask=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : List[str] , _A : Optional[Any] , _A : Dict , _A : Dict , _A : Any ) -> int: """simple docstring""" snake_case_ : str = self.num_labels snake_case_ : Tuple = GPTNeoXForSequenceClassification(_A ) model.to(_A ) model.eval() snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Any = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Dict , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : Any ) -> Optional[int]: """simple docstring""" snake_case_ : Tuple = self.num_labels snake_case_ : Tuple = GPTNeoXForTokenClassification(_A ) model.to(_A ) model.eval() snake_case_ : List[str] = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Union[str, Any] ) -> int: """simple docstring""" snake_case_ : List[Any] = True snake_case_ : Dict = GPTNeoXForCausalLM(config=_A ) model.to(_A ) model.eval() # first forward pass snake_case_ : List[str] = model(_A , attention_mask=_A , use_cache=_A ) snake_case_ : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ : str = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ : Any = model(_A , attention_mask=_A , output_hidden_states=_A ) snake_case_ : Union[str, Any] = output_from_no_past['hidden_states'][0] snake_case_ : str = model( _A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0] # select random slice snake_case_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str ) -> List[Any]: """simple docstring""" snake_case_ : Dict = self.prepare_config_and_inputs() snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Union[str, Any] = config_and_inputs snake_case_ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: Optional[int] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __magic_name__: str = (GPTNeoXForCausalLM,) if is_torch_available() else () __magic_name__: Optional[Any] = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __magic_name__: Any = False __magic_name__: Union[str, Any] = False __magic_name__: Dict = False __magic_name__: Any = False def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ : List[Any] = GPTNeoXModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=_A , hidden_size=64 , num_attention_heads=8 ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> int: """simple docstring""" snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ : Any = None self.model_tester.create_and_check_model_as_decoder(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A ) def UpperCAmelCase_ ( self : Tuple ) -> int: """simple docstring""" snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> str: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCAmelCase_ ( self : str ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCAmelCase_ ( self : List[Any] , _A : List[Any] ) -> str: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = ids_tensor([1, 10] , config.vocab_size ) snake_case_ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case_ : Optional[Any] = GPTNeoXModel(_A ) original_model.to(_A ) original_model.eval() snake_case_ : Optional[Any] = original_model(_A ).last_hidden_state snake_case_ : Dict = original_model(_A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case_ : Any = {'type': scaling_type, 'factor': 1_0.0} snake_case_ : Tuple = GPTNeoXModel(_A ) scaled_model.to(_A ) scaled_model.eval() snake_case_ : Optional[Any] = scaled_model(_A ).last_hidden_state snake_case_ : List[str] = scaled_model(_A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_A , _A , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_A , _A , atol=1E-5 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : Dict = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: snake_case_ : List[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(_A ) snake_case_ : List[str] = tokenizer('My favorite food is' , return_tensors='pt' ).to(_A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 snake_case_ : Union[str, Any] = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' snake_case_ : int = model.generate(**_A , do_sample=_A , max_new_tokens=20 ) snake_case_ : Dict = tokenizer.batch_decode(_A )[0] self.assertEqual(_A , _A )
88
from decimal import Decimal, getcontext from math import ceil, factorial def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) snake_case_ : Dict = precision snake_case_ : str = ceil(precision / 14 ) snake_case_ : str = 42_68_80 * Decimal(1_00_05 ).sqrt() snake_case_ : Tuple = 1 snake_case_ : int = 13_59_14_09 snake_case_ : Tuple = Decimal(__a ) for k in range(1 , __a ): snake_case_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _SCREAMING_SNAKE_CASE = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
88
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = BioGptTokenizer SCREAMING_SNAKE_CASE = False def _a (self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCAmelCase__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) UpperCAmelCase__ : Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(_lowerCamelCase ) ) def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Tuple = """lower newer""" UpperCAmelCase__ : int = """lower newer""" return input_text, output_text def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[str] = """lower""" UpperCAmelCase__ : Optional[Any] = ["""low""", """er</w>"""] UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Optional[int] = tokens + ["""<unk>"""] UpperCAmelCase__ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) @slow def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase ) UpperCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
171
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _A = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["""OwlViTFeatureExtractor"""] _A = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
171
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : List[str] = logging.get_logger(__name__) A_ : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : List[str] ='''cvt''' def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=[7, 3, 3] , _lowerCamelCase=[4, 2, 2] , _lowerCamelCase=[2, 1, 1] , _lowerCamelCase=[6_4, 1_9_2, 3_8_4] , _lowerCamelCase=[1, 3, 6] , _lowerCamelCase=[1, 2, 1_0] , _lowerCamelCase=[4.0, 4.0, 4.0] , _lowerCamelCase=[0.0, 0.0, 0.0] , _lowerCamelCase=[0.0, 0.0, 0.0] , _lowerCamelCase=[0.0, 0.0, 0.1] , _lowerCamelCase=[True, True, True] , _lowerCamelCase=[False, False, True] , _lowerCamelCase=["dw_bn", "dw_bn", "dw_bn"] , _lowerCamelCase=[3, 3, 3] , _lowerCamelCase=[1, 1, 1] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[1, 1, 1] , _lowerCamelCase=[1, 1, 1] , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) UpperCamelCase_: Tuple = num_channels UpperCamelCase_: Optional[int] = patch_sizes UpperCamelCase_: Union[str, Any] = patch_stride UpperCamelCase_: List[Any] = patch_padding UpperCamelCase_: List[Any] = embed_dim UpperCamelCase_: str = num_heads UpperCamelCase_: Tuple = depth UpperCamelCase_: Dict = mlp_ratio UpperCamelCase_: Union[str, Any] = attention_drop_rate UpperCamelCase_: int = drop_rate UpperCamelCase_: Optional[Any] = drop_path_rate UpperCamelCase_: Optional[int] = qkv_bias UpperCamelCase_: Tuple = cls_token UpperCamelCase_: List[str] = qkv_projection_method UpperCamelCase_: Tuple = kernel_qkv UpperCamelCase_: Dict = padding_kv UpperCamelCase_: Optional[int] = stride_kv UpperCamelCase_: Optional[Any] = padding_q UpperCamelCase_: Dict = stride_q UpperCamelCase_: Union[str, Any] = initializer_range UpperCamelCase_: List[Any] = layer_norm_eps
292
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: Any = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) UpperCamelCase_: List[str] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(_lowerCamelCase ) , torch_builtin(_lowerCamelCase ) ) ) self.assertFalse(torch.allclose(gelu_python(_lowerCamelCase ) , gelu_new(_lowerCamelCase ) ) ) def _a ( self ): UpperCamelCase_: Optional[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) UpperCamelCase_: Union[str, Any] = get_activation('gelu' ) UpperCamelCase_: int = get_activation('gelu_10' ) UpperCamelCase_: Union[str, Any] = torch_builtin(_lowerCamelCase ) UpperCamelCase_: List[str] = geluaa(_lowerCamelCase ) UpperCamelCase_: Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_lowerCamelCase ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _a ( self ): get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(_lowerCamelCase ): get_activation('bogus' ) with self.assertRaises(_lowerCamelCase ): get_activation(_lowerCamelCase ) def _a ( self ): UpperCamelCase_: str = get_activation('gelu' ) UpperCamelCase_: str = 1 UpperCamelCase_: int = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_lowerCamelCase ): UpperCamelCase_: Tuple = acta.a
292
1
"""simple docstring""" from __future__ import annotations def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Any ): # noqa: E741 while r - l > 1: UpperCAmelCase : int = (l + r) // 2 if v[m] >= key: UpperCAmelCase : Union[str, Any] = m else: UpperCAmelCase : Dict = m # noqa: E741 return r def _snake_case ( UpperCamelCase : list[int] ): if len(UpperCamelCase ) == 0: return 0 UpperCAmelCase : Union[str, Any] = [0] * len(UpperCamelCase ) UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Optional[Any] = v[0] for i in range(1 , len(UpperCamelCase ) ): if v[i] < tail[0]: UpperCAmelCase : List[str] = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase : Dict = v[i] length += 1 else: UpperCAmelCase : Optional[Any] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
109
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: str ): __SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase ) # Initialize Result __SCREAMING_SNAKE_CASE : Tuple = [] # Traverse through all denomination for denomination in reversed(_lowerCamelCase ): # Find denominations while int(_lowerCamelCase ) >= int(_lowerCamelCase ): total_value -= int(_lowerCamelCase ) answer.append(_lowerCamelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCamelCase__ : int = [] UpperCamelCase__ : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): UpperCamelCase__ : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) UpperCamelCase__ : str = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter UpperCamelCase__ : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] UpperCamelCase__ : str = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f"Following is minimal change for {value}: ") UpperCamelCase__ : int = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
112
0
def a__ ( UpperCAmelCase : int ) -> bool: if not isinstance(UpperCAmelCase , UpperCAmelCase ): UpperCAmelCase : List[str] = f'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase ) if number < 0: return False UpperCAmelCase : List[str] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
99
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(4_2) _lowerCamelCase : List[str] = "bert-base-cased" _lowerCamelCase : str = "fp16" _lowerCamelCase : Optional[int] = "bf16" _lowerCamelCase : List[str] = [FPaa, BFaa] @require_fsdp @require_cuda class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : Tuple ): super().setUp() UpperCAmelCase : Optional[Any] = dict( ACCELERATE_USE_FSDP='''true''', MASTER_ADDR='''localhost''', MASTER_PORT='''10999''', RANK='''0''', LOCAL_RANK='''0''', WORLD_SIZE='''1''', ) def __magic_name__ ( self : int ): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): UpperCAmelCase : List[Any] = self.dist_env.copy() UpperCAmelCase : Union[str, Any] = F'''{i + 1}''' UpperCAmelCase : Union[str, Any] = strategy with mockenv_context(**__A ): UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1 ) ) def __magic_name__ ( self : Dict ): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): UpperCAmelCase : int = self.dist_env.copy() UpperCAmelCase : Dict = prefetch_policy with mockenv_context(**__A ): UpperCAmelCase : Optional[int] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1 ) ) def __magic_name__ ( self : Any ): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): UpperCAmelCase : Any = self.dist_env.copy() UpperCAmelCase : int = state_dict_type with mockenv_context(**__A ): UpperCAmelCase : str = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def __magic_name__ ( self : int ): UpperCAmelCase : Optional[int] = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: UpperCAmelCase : Any = self.dist_env.copy() UpperCAmelCase : List[Any] = policy if policy == "TRANSFORMER_BASED_WRAP": UpperCAmelCase : Tuple = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": UpperCAmelCase : List[str] = '''2000''' with mockenv_context(**__A ): UpperCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) UpperCAmelCase : List[str] = self.dist_env.copy() UpperCAmelCase : Tuple = '''TRANSFORMER_BASED_WRAP''' UpperCAmelCase : Optional[Any] = '''T5Layer''' with mockenv_context(**__A ): UpperCAmelCase : int = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) UpperCAmelCase : List[Any] = self.dist_env.copy() UpperCAmelCase : str = '''SIZE_BASED_WRAP''' UpperCAmelCase : str = '''0''' with mockenv_context(**__A ): UpperCAmelCase : Optional[int] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def __magic_name__ ( self : int ): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: UpperCAmelCase : List[Any] = self.dist_env.copy() UpperCAmelCase : int = mp_dtype with mockenv_context(**__A ): UpperCAmelCase : int = Accelerator() if mp_dtype == "fp16": UpperCAmelCase : Any = torch.floataa elif mp_dtype == "bf16": UpperCAmelCase : Any = torch.bfloataa UpperCAmelCase : Optional[Any] = MixedPrecision(param_dtype=__A, reduce_dtype=__A, buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler, __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def __magic_name__ ( self : Optional[int] ): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: UpperCAmelCase : Any = self.dist_env.copy() UpperCAmelCase : int = str(__A ).lower() with mockenv_context(**__A ): UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class __UpperCAmelCase ( lowerCamelCase__ ): def __magic_name__ ( self : List[Any] ): super().setUp() UpperCAmelCase : int = 0.8_2 UpperCAmelCase : List[str] = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] UpperCAmelCase : int = { '''multi_gpu_fp16''': 3_2_0_0, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0, '''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } UpperCAmelCase : List[str] = 1_6_0 UpperCAmelCase : Optional[int] = 1_6_0 UpperCAmelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def __magic_name__ ( self : str ): UpperCAmelCase : str = os.path.join(self.test_scripts_folder, '''test_performance.py''' ) UpperCAmelCase : Any = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: UpperCAmelCase : Union[str, Any] = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F'''--output_dir={self.tmpdir}''', F'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A, env=os.environ.copy() ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder, '''test_checkpointing.py''' ) UpperCAmelCase : Optional[int] = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__A ): UpperCAmelCase : Optional[Any] = cmd.copy() cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue UpperCAmelCase : Any = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: UpperCAmelCase : List[Any] = cmd_config[:state_dict_config_index] cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, F'''--output_dir={self.tmpdir}''', '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A, env=os.environ.copy() ) UpperCAmelCase : Tuple = cmd_config[:-1] UpperCAmelCase : List[str] = os.path.join(self.tmpdir, '''epoch_0''' ) cmd_config.extend( [ F'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A, env=os.environ.copy() ) def __magic_name__ ( self : Any ): UpperCAmelCase : Optional[Any] = os.path.join(self.test_scripts_folder, '''test_peak_memory_usage.py''' ) UpperCAmelCase : str = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): UpperCAmelCase : List[Any] = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F'''--output_dir={self.tmpdir}''', F'''--peak_memory_upper_bound={peak_mem_upper_bound}''', F'''--n_train={self.n_train}''', F'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A, env=os.environ.copy() )
99
1
from collections.abc import Iterable from typing import Any class UpperCamelCase__ : """simple docstring""" def __init__( self , _A = None ) -> Dict: SCREAMING_SNAKE_CASE_ = value SCREAMING_SNAKE_CASE_ = None # Added in order to delete a node easier SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None def __repr__( self ) -> str: from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 ) class UpperCamelCase__ : """simple docstring""" def __init__( self , _A = None ) -> Tuple: SCREAMING_SNAKE_CASE_ = root def __str__( self ) -> str: return str(self.root ) def _UpperCamelCase ( self , _A , _A ) -> None: if new_children is not None: # reset its kids SCREAMING_SNAKE_CASE_ = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children SCREAMING_SNAKE_CASE_ = new_children else: SCREAMING_SNAKE_CASE_ = new_children else: SCREAMING_SNAKE_CASE_ = new_children def _UpperCamelCase ( self , _A ) -> bool: if node.parent and node.parent.right: return node == node.parent.right return False def _UpperCamelCase ( self ) -> bool: return self.root is None def _UpperCamelCase ( self , _A ) -> None: SCREAMING_SNAKE_CASE_ = Node(_A ) # create a new Node if self.empty(): # if Tree is empty SCREAMING_SNAKE_CASE_ = new_node # set its root else: # Tree is not empty SCREAMING_SNAKE_CASE_ = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: SCREAMING_SNAKE_CASE_ = new_node # We insert the new node in a leaf break else: SCREAMING_SNAKE_CASE_ = parent_node.left else: if parent_node.right is None: SCREAMING_SNAKE_CASE_ = new_node break else: SCREAMING_SNAKE_CASE_ = parent_node.right SCREAMING_SNAKE_CASE_ = parent_node def _UpperCamelCase ( self , *_A ) -> None: for value in values: self.__insert(_A ) def _UpperCamelCase ( self , _A ) -> Node | None: if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: SCREAMING_SNAKE_CASE_ = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: SCREAMING_SNAKE_CASE_ = node.left if value < node.value else node.right return node def _UpperCamelCase ( self , _A = None ) -> Node | None: if node is None: if self.root is None: return None SCREAMING_SNAKE_CASE_ = self.root if not self.empty(): while node.right is not None: SCREAMING_SNAKE_CASE_ = node.right return node def _UpperCamelCase ( self , _A = None ) -> Node | None: if node is None: SCREAMING_SNAKE_CASE_ = self.root if self.root is None: return None if not self.empty(): SCREAMING_SNAKE_CASE_ = self.root while node.left is not None: SCREAMING_SNAKE_CASE_ = node.left return node def _UpperCamelCase ( self , _A ) -> None: SCREAMING_SNAKE_CASE_ = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: SCREAMING_SNAKE_CASE_ = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore SCREAMING_SNAKE_CASE_ = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def _UpperCamelCase ( self , _A ) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def _UpperCamelCase ( self , _A=None ) -> Any: if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def _UpperCamelCase ( self , _A , _A ) -> None: if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def _UpperCamelCase ( self , _A , _A ) -> int: SCREAMING_SNAKE_CASE_ = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = [] if curr_node is not None: SCREAMING_SNAKE_CASE_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def A__ ( ): SCREAMING_SNAKE_CASE_ = (8, 3, 6, 1, 10, 14, 13, 4, 7) SCREAMING_SNAKE_CASE_ = BinarySearchTree() for i in testlist: t.insert(__lowerCamelCase ) # Prints all the elements of the list in order traversal print(__lowerCamelCase ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(__lowerCamelCase ) print(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
def A__ ( __lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' import doctest from collections import deque import numpy as np class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase__ : str = [2, 1, 2, -1] lowerCAmelCase__ : List[str] = [1, 2, 3, 4] def UpperCAmelCase_ ( self ) -> list[float]: lowerCAmelCase__ : List[str] = len(self.first_signal ) lowerCAmelCase__ : Union[str, Any] = len(self.second_signal ) lowerCAmelCase__ : Optional[int] = max(__UpperCAmelCase ,__UpperCAmelCase ) # create a zero matrix of max_length x max_length lowerCAmelCase__ : str = [[0] * max_length for i in range(__UpperCAmelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(__UpperCAmelCase ): lowerCAmelCase__ : Any = deque(self.second_signal ) rotated_signal.rotate(__UpperCAmelCase ) for j, item in enumerate(__UpperCAmelCase ): matrix[i][j] += item # multiply the matrix with the first signal lowerCAmelCase__ : List[str] = np.matmul(np.transpose(__UpperCAmelCase ) ,np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(__UpperCAmelCase ,2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
184
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = tempfile.mkdtemp() lowerCAmelCase__ : List[Any] = 8 # DPR tok lowerCAmelCase__ : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok lowerCAmelCase__ : str = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""} lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def UpperCAmelCase_ ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def UpperCAmelCase_ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" ) lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__UpperCAmelCase ) rag_tokenizer.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) lowerCAmelCase__ : Optional[Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) lowerCAmelCase__ : str = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
184
1
from __future__ import annotations from typing import TypedDict class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = 42 a__ = 42 def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(A_ ) )] def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) __magic_name__ = all_rotations(A_ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation __magic_name__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(A_ ), } return response def a__ ( A_, A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: __magic_name__ = int(A_ ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(A_ ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) __magic_name__ = [""""""] * len(A_ ) for _ in range(len(A_ ) ): for i in range(len(A_ ) ): __magic_name__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": __lowerCAmelCase : Tuple = 'Provide a string that I will generate its BWT transform: ' __lowerCAmelCase : str = input(entry_msg).strip() __lowerCAmelCase : Dict = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result["bwt_string"]}\'''' ) __lowerCAmelCase : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ''' F'''we get original string \'{original_string}\'''' )
88
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __lowerCAmelCase : Any = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def a__ ( A_=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = None a__ = None def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" with TemporaryDirectory() as tmp_dir: __magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ ) __magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ ) __magic_name__ = builder_cls( cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , ) __magic_name__ = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) __magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ ) self.assertTrue(os.path.exists(UpperCamelCase__ ) ) @pytest.mark.integration def a__ ( A_ ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" __magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ ) __magic_name__ = import_main_class(dataset_module.module_path ) __magic_name__ = builder_cls( cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __magic_name__ = None builder_instance.download_and_prepare() __magic_name__ = builder_instance.as_dataset() assert ds @pytest.mark.integration def a__ ( A_ ): '''simple docstring''' __magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ ) __magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ ) __magic_name__ = builder_cls( cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, ) __magic_name__ = builder_instance.as_streaming_dataset() assert ds assert isinstance(A_, A_ ) assert "train" in ds assert isinstance(ds["""train"""], A_ ) assert next(iter(ds["""train"""] ) )
88
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" _enforce_args(UpperCamelCase , UpperCamelCase ) if n == 0: return 0 lowerCAmelCase__ : List[str] = float("""-inf""" ) for i in range(1 , n + 1 ): lowerCAmelCase__ : Optional[int] = max( UpperCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase ) ) return max_revue def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" _enforce_args(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : int = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowerCAmelCase__ : Optional[int] = float("""-inf""" ) for i in range(1 , n + 1 ): lowerCAmelCase__ : Dict = max( UpperCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase , UpperCamelCase ) , ) lowerCAmelCase__ : Union[str, Any] = max_revenue return max_rev[n] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" _enforce_args(UpperCamelCase , UpperCamelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowerCAmelCase__ : int = [float("""-inf""" ) for _ in range(n + 1 )] lowerCAmelCase__ : int = 0 for i in range(1 , n + 1 ): lowerCAmelCase__ : Optional[int] = max_rev[i] for j in range(1 , i + 1 ): lowerCAmelCase__ : Optional[int] = max(UpperCamelCase , prices[j - 1] + max_rev[i - j] ) lowerCAmelCase__ : Optional[Any] = max_revenue_i return max_rev[n] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if n < 0: lowerCAmelCase__ : List[Any] = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(UpperCamelCase ) if n > len(UpperCamelCase ): lowerCAmelCase__ : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ f"""Got n = {n} but length of prices = {len(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[str] = [6, 10, 12, 15, 20, 23] lowerCAmelCase__ : Tuple = len(UpperCamelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowerCAmelCase__ : int = 36 lowerCAmelCase__ : Tuple = top_down_cut_rod(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = bottom_up_cut_rod(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = naive_cut_rod_recursive(UpperCamelCase , UpperCamelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
184
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = tempfile.mkdtemp() lowerCAmelCase__ : List[Any] = 8 # DPR tok lowerCAmelCase__ : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok lowerCAmelCase__ : str = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""} lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def UpperCAmelCase_ ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def UpperCAmelCase_ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" ) lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__UpperCAmelCase ) rag_tokenizer.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) lowerCAmelCase__ : Optional[Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) lowerCAmelCase__ : str = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
184
1
"""simple docstring""" from __future__ import annotations def A__ ( UpperCamelCase ): # This function is recursive A = len(UpperCamelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else A = array[0] A = False A = 1 A = [] while not is_found and i < array_length: if array[i] < pivot: A = True A = [element for element in array[i:] if element >= array[i]] A = longest_subsequence(UpperCamelCase ) if len(UpperCamelCase ) > len(UpperCamelCase ): A = temp_array else: i += 1 A = [element for element in array[1:] if element >= pivot] A = [pivot, *longest_subsequence(UpperCamelCase )] if len(UpperCamelCase ) > len(UpperCamelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
import pytest lowerCAmelCase__ : Optional[int] ='__dummy_dataset1__' lowerCAmelCase__ : str ='\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def a__ ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a__ ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a__ ( A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : List[str] = dataset_loading_script_name SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A__ ) SCREAMING_SNAKE_CASE_ : List[str] = script_dir / F'''{script_name}.py''' with open(A__, 'w' ) as f: f.write(A__ ) return str(A__ )
359
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ : List[Any] =logging.get_logger(__name__) lowerCAmelCase__ : Tuple ={ 'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json', } class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = """focalnet""" def __init__( self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=9_6 , lowerCAmelCase__=False , lowerCAmelCase__=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[3, 3, 3, 3] , lowerCAmelCase__="gelu" , lowerCAmelCase__=4.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=False , lowerCAmelCase__=1E-4 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=3_2 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ): """simple docstring""" super().__init__(**lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = image_size SCREAMING_SNAKE_CASE_ : str = patch_size SCREAMING_SNAKE_CASE_ : List[Any] = num_channels SCREAMING_SNAKE_CASE_ : Union[str, Any] = embed_dim SCREAMING_SNAKE_CASE_ : Any = use_conv_embed SCREAMING_SNAKE_CASE_ : Dict = hidden_sizes SCREAMING_SNAKE_CASE_ : Any = depths SCREAMING_SNAKE_CASE_ : Optional[Any] = focal_levels SCREAMING_SNAKE_CASE_ : Any = focal_windows SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : Dict = mlp_ratio SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Tuple = drop_path_rate SCREAMING_SNAKE_CASE_ : List[Any] = use_layerscale SCREAMING_SNAKE_CASE_ : List[Any] = layerscale_value SCREAMING_SNAKE_CASE_ : List[str] = use_post_layernorm SCREAMING_SNAKE_CASE_ : Optional[int] = use_post_layernorm_in_modulation SCREAMING_SNAKE_CASE_ : str = normalize_modulator SCREAMING_SNAKE_CASE_ : List[str] = initializer_range SCREAMING_SNAKE_CASE_ : str = layer_norm_eps SCREAMING_SNAKE_CASE_ : Dict = encoder_stride SCREAMING_SNAKE_CASE_ : Dict = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
162
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
99
def A_ ( A__ , A__ ) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) a__ : List[str] = str(bin(A__ ) )[2:] # remove the leading "0b" a__ : Optional[int] = str(bin(A__ ) )[2:] # remove the leading "0b" a__ : List[str] = max(len(A__ ) , len(A__ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
99
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase ( snake_case__ , unittest.TestCase): """simple docstring""" a__ : Dict = KandinskyImgaImgPipeline a__ : Union[str, Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"] a__ : List[Any] = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] a__ : Any = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] a__ : List[str] = False @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: return 32 @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: return 32 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return self.time_input_dim @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: return self.time_input_dim * 4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: return 100 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: UpperCAmelCase_= XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: torch.manual_seed(0 ) UpperCAmelCase_= MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) UpperCAmelCase_= MultilingualCLIP(__UpperCAmelCase ) UpperCAmelCase_= text_encoder.eval() return text_encoder @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_= { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase_= UNetaDConditionModel(**__UpperCAmelCase ) return model @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: torch.manual_seed(0 ) UpperCAmelCase_= VQModel(**self.dummy_movq_kwargs ) return model def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: UpperCAmelCase_= self.dummy_text_encoder UpperCAmelCase_= self.dummy_tokenizer UpperCAmelCase_= self.dummy_unet UpperCAmelCase_= self.dummy_movq UpperCAmelCase_= { """num_train_timesteps""": 1_000, """beta_schedule""": """linear""", """beta_start""": 0.00_085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } UpperCAmelCase_= DDIMScheduler(**__UpperCAmelCase ) UpperCAmelCase_= { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=0 ) -> Union[str, Any]: UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase ) # create init_image UpperCAmelCase_= floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_= Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((256, 256) ) if str(__UpperCAmelCase ).startswith("""mps""" ): UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase ) else: UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) UpperCAmelCase_= { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_= """cpu""" UpperCAmelCase_= self.get_dummy_components() UpperCAmelCase_= self.pipeline_class(**__UpperCAmelCase ) UpperCAmelCase_= pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase_= pipe(**self.get_dummy_inputs(__UpperCAmelCase ) ) UpperCAmelCase_= output.images UpperCAmelCase_= pipe( **self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0] UpperCAmelCase_= image[0, -3:, -3:, -1] UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_= np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class lowercase ( unittest.TestCase): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: UpperCAmelCase_= load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) UpperCAmelCase_= load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) UpperCAmelCase_= """A red cartoon frog, 4k""" UpperCAmelCase_= KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__UpperCAmelCase ) UpperCAmelCase_= KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) UpperCAmelCase_= pipeline.to(__UpperCAmelCase ) pipeline.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase_= torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase_, UpperCAmelCase_= pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() UpperCAmelCase_= pipeline( __UpperCAmelCase , image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) UpperCAmelCase_= output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
277
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __A = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' __A = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' __A = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowercase ( datasets.Metric): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase ) }
277
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : List[Any] = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = ["MaskFormerFeatureExtractor"] A : Dict = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] A : int = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
184
def lowercase_ ( _A : int , _A : int ): """simple docstring""" while a != 0: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a return b def lowercase_ ( _A : int , _A : int ): """simple docstring""" if gcd(_A , _A ) != 1: lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m while va != 0: lowerCamelCase__ : Tuple = ua // va lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
184
1
"""simple docstring""" import numpy as np def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return vector * sigmoid(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple: _UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]: _UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : str )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : str )->Dict: _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6], [-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7], [-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
326
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Optional[int] = logging.get_logger(__name__) A : Any = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "luke" def __init__( self : Tuple , __lowerCamelCase : Any=50267 , __lowerCamelCase : Any=500000 , __lowerCamelCase : str=768 , __lowerCamelCase : int=256 , __lowerCamelCase : str=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Optional[int]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Union[str, Any]=1E-1_2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Tuple=2 , **__lowerCamelCase : List[str] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Tuple = vocab_size lowerCamelCase__ : Any = entity_vocab_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Union[str, Any] = entity_emb_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : str = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : Dict = type_vocab_size lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Any = layer_norm_eps lowerCamelCase__ : int = use_entity_aware_attention lowerCamelCase__ : Any = classifier_dropout
184
def lowercase_ ( _A : int , _A : int ): """simple docstring""" while a != 0: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a return b def lowercase_ ( _A : int , _A : int ): """simple docstring""" if gcd(_A , _A ) != 1: lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m while va != 0: lowerCamelCase__ : Tuple = ua // va lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
184
1
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def _a ( self : Tuple , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase , config_name=_lowerCamelCase ) A_ : Optional[Any] = GenerationConfig.from_pretrained(_lowerCamelCase , config_name=_lowerCamelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , _lowerCamelCase ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) A_ : Optional[Any] = GenerationConfig.from_model_config(_lowerCamelCase ) A_ : Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(_lowerCamelCase , _lowerCamelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Tuple = GenerationConfig() A_ : Optional[Any] = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } A_ : List[Any] = copy.deepcopy(_lowerCamelCase ) A_ : int = generation_config.update(**_lowerCamelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(_lowerCamelCase , {'''foo''': '''bar'''} ) def _a ( self : Dict ): """simple docstring""" A_ : Tuple = GenerationConfig() A_ : Any = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(_lowerCamelCase ) A_ : Tuple = GenerationConfig.from_pretrained(_lowerCamelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) A_ : int = GenerationConfig.from_model_config(_lowerCamelCase ) assert not hasattr(_lowerCamelCase , '''foo''' ) # no new kwargs should be initialized if from config def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , _lowerCamelCase ) self.assertEqual(default_config.num_beams , 1 ) A_ : Dict = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , _lowerCamelCase ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase ) A_ : int = GenerationConfig.from_pretrained(_lowerCamelCase , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , _lowerCamelCase ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : List[str] ): """simple docstring""" A_ : Optional[int] = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : Optional[Any] ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def _a ( self : Any ): """simple docstring""" A_ : List[str] = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) A_ : List[Any] = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _lowerCamelCase , repo_id='''test-generation-config''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) A_ : Dict = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Union[str, Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
4
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
1
def __lowercase ( _UpperCamelCase ) ->bool: """simple docstring""" lowercase : Optional[int] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
337
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __lowerCamelCase = '''src/transformers''' __lowerCamelCase = '''docs/source/en/tasks''' def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]: with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f: A_ = f.readlines() # Find the start prompt. A_ = 0 while not lines[start_index].startswith(UpperCAmelCase__ ): start_index += 1 start_index += 1 A_ = start_index while not lines[end_index].startswith(UpperCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase = direct_transformers_import(TRANSFORMERS_PATH) __lowerCamelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __lowerCamelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict: A_ = TASK_GUIDE_TO_MODELS[task_guide] A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase__, set() ) A_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> Optional[Any]: A_ , A_ , A_ , A_ = _find_text_in_file( filename=os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""", end_prompt="""<!--End of the generated tip-->""", ) A_ = get_model_list_for_task(UpperCAmelCase__ ) if current_list != new_list: if overwrite: with open(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), """w""", encoding="""utf-8""", newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' """ to fix this.""" ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __lowerCamelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
162
0
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase = pd.read_csv('''sample_data.csv''', header=None) UpperCamelCase = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase = df.iloc[:, 1:2] UpperCamelCase = actual_data.values.reshape(len_data, 1) UpperCamelCase = MinMaxScaler().fit_transform(actual_data) UpperCamelCase = 10 UpperCamelCase = 5 UpperCamelCase = 20 UpperCamelCase = len_data - periods * look_back UpperCamelCase = actual_data[:division] UpperCamelCase = actual_data[division - look_back :] UpperCamelCase = [], [] UpperCamelCase = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase = np.array(train_x) UpperCamelCase = np.array(test_x) UpperCamelCase = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') UpperCamelCase = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase = model.predict(x_test)
364
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000): lowercase__ : Union[str, Any] = 1 lowercase__ : int = 0 for divide_by_number in range(_lowerCamelCase , digit + 1): lowercase__ : list[int] = [] lowercase__ : Dict = numerator for _ in range(1 , digit + 1): if now_divide in has_been_divided: if longest_list_length < len(_lowerCamelCase): lowercase__ : Union[str, Any] = len(_lowerCamelCase) lowercase__ : Optional[int] = divide_by_number else: has_been_divided.append(_lowerCamelCase) lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
333
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] _SCREAMING_SNAKE_CASE = """BlipImageProcessor""" _SCREAMING_SNAKE_CASE = """AutoTokenizer""" def __init__( self : List[str], _snake_case : Union[str, Any], _snake_case : Any ) ->Optional[int]: snake_case__ : List[str] = False super().__init__(_snake_case, _snake_case ) snake_case__ : int = self.image_processor def __call__( self : List[str], _snake_case : ImageInput = None, _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, _snake_case : bool = True, _snake_case : Union[bool, str, PaddingStrategy] = False, _snake_case : Union[bool, str, TruncationStrategy] = None, _snake_case : Optional[int] = None, _snake_case : int = 0, _snake_case : Optional[int] = None, _snake_case : Optional[bool] = None, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = True, _snake_case : Optional[Union[str, TensorType]] = None, **_snake_case : Optional[int], ) ->BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: snake_case__ : List[Any] = self.tokenizer snake_case__ : Dict = self.tokenizer( text=_snake_case, add_special_tokens=_snake_case, padding=_snake_case, truncation=_snake_case, max_length=_snake_case, stride=_snake_case, pad_to_multiple_of=_snake_case, return_attention_mask=_snake_case, return_overflowing_tokens=_snake_case, return_special_tokens_mask=_snake_case, return_offsets_mapping=_snake_case, return_token_type_ids=_snake_case, return_length=_snake_case, verbose=_snake_case, return_tensors=_snake_case, **_snake_case, ) return text_encoding # add pixel_values snake_case__ : List[str] = self.image_processor(_snake_case, return_tensors=_snake_case ) if text is not None: snake_case__ : Dict = self.tokenizer( text=_snake_case, add_special_tokens=_snake_case, padding=_snake_case, truncation=_snake_case, max_length=_snake_case, stride=_snake_case, pad_to_multiple_of=_snake_case, return_attention_mask=_snake_case, return_overflowing_tokens=_snake_case, return_special_tokens_mask=_snake_case, return_offsets_mapping=_snake_case, return_token_type_ids=_snake_case, return_length=_snake_case, verbose=_snake_case, return_tensors=_snake_case, **_snake_case, ) else: snake_case__ : Dict = None if text_encoding is not None: encoding_image_processor.update(_snake_case ) return encoding_image_processor def lowercase_ ( self : str, *_snake_case : int, **_snake_case : Optional[int] ) ->int: return self.tokenizer.batch_decode(*_snake_case, **_snake_case ) def lowercase_ ( self : str, *_snake_case : Optional[int], **_snake_case : List[str] ) ->Dict: return self.tokenizer.decode(*_snake_case, **_snake_case ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowercase_ ( self : Tuple ) ->List[str]: snake_case__ : Tuple = self.tokenizer.model_input_names snake_case__ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
277
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ :Tuple = logging.get_logger(__name__) a_ :Union[str, Any] = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """deberta-v2""" def __init__( self : Union[str, Any], _snake_case : Dict=1_2_8_1_0_0, _snake_case : Any=1_5_3_6, _snake_case : Tuple=2_4, _snake_case : int=2_4, _snake_case : Optional[int]=6_1_4_4, _snake_case : Optional[int]="gelu", _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : str=5_1_2, _snake_case : Optional[int]=0, _snake_case : Optional[int]=0.0_2, _snake_case : Dict=1e-7, _snake_case : int=False, _snake_case : Any=-1, _snake_case : List[str]=0, _snake_case : Tuple=True, _snake_case : Any=None, _snake_case : Union[str, Any]=0, _snake_case : Tuple="gelu", **_snake_case : Union[str, Any], ) ->Optional[int]: super().__init__(**_snake_case ) snake_case__ : Dict = hidden_size snake_case__ : Optional[int] = num_hidden_layers snake_case__ : Any = num_attention_heads snake_case__ : List[Any] = intermediate_size snake_case__ : List[Any] = hidden_act snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : List[str] = max_position_embeddings snake_case__ : List[str] = type_vocab_size snake_case__ : Optional[Any] = initializer_range snake_case__ : Optional[int] = relative_attention snake_case__ : Tuple = max_relative_positions snake_case__ : Union[str, Any] = pad_token_id snake_case__ : Optional[int] = position_biased_input # Backwards compatibility if type(_snake_case ) == str: snake_case__ : int = [x.strip() for x in pos_att_type.lower().split('|' )] snake_case__ : List[str] = pos_att_type snake_case__ : Union[str, Any] = vocab_size snake_case__ : Optional[int] = layer_norm_eps snake_case__ : Optional[int] = kwargs.get('pooler_hidden_size', _snake_case ) snake_case__ : int = pooler_dropout snake_case__ : str = pooler_hidden_act class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" @property def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case__ : int = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def lowercase_ ( self : Dict ) ->int: return 1_2 def lowercase_ ( self : Tuple, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional["TensorType"] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, _snake_case : "PreTrainedTokenizerBase" = None, ) ->Mapping[str, Any]: snake_case__ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=_snake_case, framework=_snake_case ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
277
1
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCamelCase : '''simple docstring''' def __init__( self : Dict , a_ : List[str] , a_ : int=13 , a_ : int=7 , a_ : Optional[Any]=True , a_ : Optional[int]=True , a_ : Dict=True , a_ : Dict=True , a_ : List[Any]=True , a_ : Dict=False , a_ : Dict=False , a_ : Any=False , a_ : str=2 , a_ : Any=99 , a_ : Tuple=0 , a_ : int=32 , a_ : Optional[int]=5 , a_ : List[Any]=4 , a_ : List[str]=0.1 , a_ : Optional[Any]=0.1 , a_ : Tuple=5_12 , a_ : Any=2 , a_ : List[Any]=0.02 , a_ : Optional[Any]=2 , a_ : Tuple=4 , a_ : Optional[int]="last" , a_ : Dict=True , a_ : List[str]=None , a_ : Dict=0 , ): lowerCAmelCase_ : Tuple = parent lowerCAmelCase_ : Optional[int] = batch_size lowerCAmelCase_ : Union[str, Any] = seq_length lowerCAmelCase_ : Any = is_training lowerCAmelCase_ : List[str] = use_input_lengths lowerCAmelCase_ : List[Any] = use_token_type_ids lowerCAmelCase_ : List[str] = use_labels lowerCAmelCase_ : Optional[int] = gelu_activation lowerCAmelCase_ : List[Any] = sinusoidal_embeddings lowerCAmelCase_ : Optional[int] = causal lowerCAmelCase_ : List[str] = asm lowerCAmelCase_ : Tuple = n_langs lowerCAmelCase_ : Tuple = vocab_size lowerCAmelCase_ : Optional[Any] = n_special lowerCAmelCase_ : List[str] = hidden_size lowerCAmelCase_ : str = num_hidden_layers lowerCAmelCase_ : Union[str, Any] = num_attention_heads lowerCAmelCase_ : List[str] = hidden_dropout_prob lowerCAmelCase_ : Any = attention_probs_dropout_prob lowerCAmelCase_ : int = max_position_embeddings lowerCAmelCase_ : int = type_sequence_label_size lowerCAmelCase_ : Dict = initializer_range lowerCAmelCase_ : Dict = num_labels lowerCAmelCase_ : int = num_choices lowerCAmelCase_ : Optional[Any] = summary_type lowerCAmelCase_ : int = use_proj lowerCAmelCase_ : Dict = scope lowerCAmelCase_ : str = bos_token_id def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : Union[str, Any] = None if self.use_input_lengths: lowerCAmelCase_ : Optional[int] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase_ : int = None if self.use_token_type_ids: lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : Any = None if self.use_labels: lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCamelCase ( self : Union[str, Any] ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowerCamelCase ( self : str , a_ : Any , a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : int , a_ : Tuple , a_ : Tuple , a_ : Any , a_ : Any , ): lowerCAmelCase_ : str = XLMModel(config=a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Tuple = model(a_ , lengths=a_ , langs=a_ ) lowerCAmelCase_ : Optional[Any] = model(a_ , langs=a_ ) lowerCAmelCase_ : Optional[int] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self : Optional[Any] , a_ : Tuple , a_ : str , a_ : List[str] , a_ : str , a_ : Any , a_ : Union[str, Any] , a_ : str , a_ : Dict , a_ : List[str] , ): lowerCAmelCase_ : Dict = XLMWithLMHeadModel(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : int = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self : Any , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : Dict , a_ : List[Any] , a_ : Dict , a_ : str , a_ : str , a_ : Union[str, Any] , ): lowerCAmelCase_ : int = XLMForQuestionAnsweringSimple(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Dict = model(a_ ) lowerCAmelCase_ : List[str] = model(a_ , start_positions=a_ , end_positions=a_ ) lowerCAmelCase_ : List[Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self : Dict , a_ : Dict , a_ : List[Any] , a_ : List[str] , a_ : int , a_ : Dict , a_ : Any , a_ : Dict , a_ : Optional[Any] , a_ : int , ): lowerCAmelCase_ : List[Any] = XLMForQuestionAnswering(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : str = model(a_ ) lowerCAmelCase_ : int = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , ) lowerCAmelCase_ : List[Any] = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , ) ((lowerCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple() lowerCAmelCase_ : Tuple = model(a_ , start_positions=a_ , end_positions=a_ ) ((lowerCAmelCase_) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCamelCase ( self : Optional[int] , a_ : Any , a_ : Any , a_ : int , a_ : Tuple , a_ : Tuple , a_ : List[Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : str , ): lowerCAmelCase_ : Dict = XLMForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : List[str] = model(a_ ) lowerCAmelCase_ : Any = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : List[str] , a_ : int , a_ : List[Any] , a_ : int , a_ : Tuple , ): lowerCAmelCase_ : List[str] = self.num_labels lowerCAmelCase_ : str = XLMForTokenClassification(a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Tuple = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self : List[Any] , a_ : Union[str, Any] , a_ : Tuple , a_ : Any , a_ : Tuple , a_ : Tuple , a_ : Optional[Any] , a_ : Dict , a_ : int , a_ : int , ): lowerCAmelCase_ : Dict = self.num_choices lowerCAmelCase_ : Optional[Any] = XLMForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() lowerCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Dict = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self : str ): lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : Union[str, Any] = config_and_inputs lowerCAmelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) a_ : List[str] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable a_ : Union[str, Any] = ( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase ( self : Dict , a_ : Dict , a_ : Dict , a_ : List[str] , a_ : List[Any] , a_ : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCamelCase ( self : Any , a_ : Union[str, Any] , a_ : Tuple , a_ : str=False ): lowerCAmelCase_ : Union[str, Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase_ : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) lowerCAmelCase_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def lowerCamelCase ( self : Any ): lowerCAmelCase_ : Union[str, Any] = XLMModelTester(self ) lowerCAmelCase_ : Any = ConfigTester(self , config_class=a_ , emb_dim=37 ) def lowerCamelCase ( self : Optional[int] ): self.config_tester.run_common_tests() def lowerCamelCase ( self : int ): lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*a_ ) def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*a_ ) def lowerCamelCase ( self : int ): lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*a_ ) def lowerCamelCase ( self : Any ): lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*a_ ) def lowerCamelCase ( self : str ): lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*a_ ) def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*a_ ) def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*a_ ) def lowerCamelCase ( self : Any , a_ : Any , a_ : List[str] , a_ : Tuple , a_ : Optional[Any] , a_ : List[str] , a_ : Dict=False , a_ : str=1 ): self.assertIsInstance(a_ , a_ ) self.assertListEqual( [isinstance(a_ , a_ ) for iter_attentions in attentions] , [True] * len(a_ ) ) self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(a_ ): # adds PAD dummy token lowerCAmelCase_ : List[Any] = min_length + idx + 1 lowerCAmelCase_ : str = min_length + idx + 1 lowerCAmelCase_ : Tuple = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a_ ) ) def lowerCamelCase ( self : Dict , a_ : Optional[Any] , a_ : Any , a_ : str , a_ : str , a_ : Union[str, Any] , a_ : str=False , a_ : List[Any]=1 ): self.assertIsInstance(a_ , a_ ) self.assertListEqual( [isinstance(a_ , a_ ) for iter_hidden_states in hidden_states] , [True] * len(a_ ) , ) self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(a_ ): # adds PAD dummy token lowerCAmelCase_ : List[Any] = min_length + idx + 1 lowerCAmelCase_ : str = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a_ ) , ) pass @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Any = XLMModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase ( self : int ): lowerCAmelCase_ : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(a_ ) lowerCAmelCase_ : str = torch.tensor([[14, 4_47]] , dtype=torch.long , device=a_ ) # the president lowerCAmelCase_ : Union[str, Any] = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase_ : Optional[int] = model.generate(a_ , do_sample=a_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a_ )
161
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase = 50 ) -> int: """simple docstring""" lowerCAmelCase_ : int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
161
1
import numpy as np def lowerCAmelCase__( lowercase : np.ndarray ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def lowerCAmelCase__( lowercase : np.ndarray ) -> np.ndarray: return vector * sigmoid(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
326
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json', 'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json', 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json' ), } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "longformer" def __init__(self , UpperCAmelCase = 512 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 0 , UpperCAmelCase = 2 , UpperCAmelCase = 30522 , UpperCAmelCase = 768 , UpperCAmelCase = 12 , UpperCAmelCase = 12 , UpperCAmelCase = 3072 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , UpperCAmelCase = 512 , UpperCAmelCase = 2 , UpperCAmelCase = 0.02 , UpperCAmelCase = 1e-1_2 , UpperCAmelCase = False , **UpperCAmelCase , ) -> Union[str, Any]: super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase ) _snake_case = attention_window _snake_case = sep_token_id _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = onnx_export class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase = "default" , UpperCAmelCase = None ) -> Union[str, Any]: super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) _snake_case = True @property def lowercase (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def lowercase (self ) -> Mapping[str, Mapping[int, str]]: _snake_case = super().outputs if self.task == "default": _snake_case = {0: """batch"""} return outputs @property def lowercase (self ) -> float: return 1e-4 @property def lowercase (self ) -> int: # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ) -> Mapping[str, Any]: _snake_case = super().generate_dummy_inputs( preprocessor=UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _snake_case = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global _snake_case = 1 return inputs
358
'''simple docstring''' import qiskit def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register _snake_case = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _snake_case = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowerCAmelCase = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
270
0
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): @parameterized.expand([(None,), ('foo.json',)] ) def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> Tuple: lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0 ) self.assertEqual(loaded_config.max_length , 2_0 ) self.assertEqual(loaded_config.max_time , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: lowerCAmelCase = AutoConfig.from_pretrained('gpt2' ) lowerCAmelCase = GenerationConfig.from_model_config(UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def __UpperCAmelCase ( self : str ) -> Dict: lowerCAmelCase = GenerationConfig() lowerCAmelCase = { 'max_new_tokens': 1_0_2_4, 'foo': 'bar', } lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ ) lowerCAmelCase = generation_config.update(**UpperCAmelCase__ ) # update_kwargs was not modified (no side effects) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(UpperCAmelCase__ , {'foo': 'bar'} ) def __UpperCAmelCase ( self : int ) -> Any: lowerCAmelCase = GenerationConfig() lowerCAmelCase = 'bar' with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir: generation_config.save_pretrained(UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase__ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , 'bar' ) lowerCAmelCase = GenerationConfig.from_model_config(UpperCAmelCase__ ) assert not hasattr(UpperCAmelCase__ , 'foo' ) # no new kwargs should be initialized if from config def __UpperCAmelCase ( self : int ) -> int: lowerCAmelCase = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , UpperCAmelCase__ ) self.assertEqual(default_config.num_beams , 1 ) lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , UpperCAmelCase__ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase__ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls : Dict ) -> List[str]: lowerCAmelCase = TOKEN HfFolder.save_token(UpperCAmelCase__ ) @classmethod def __UpperCAmelCase ( cls : str ) -> str: try: delete_repo(token=cls._token , repo_id='test-generation-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' ) except HTTPError: pass def __UpperCAmelCase ( self : int ) -> List[Any]: lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('test-generation-config' , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-generation-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase__ , repo_id='test-generation-config' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Dict ) -> Optional[int]: lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase__ , repo_id='valid_org/test-generation-config-org' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
4
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Optional[int] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Tuple ) -> Any: lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('sample_euler' ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self : List[str] ) -> Dict: lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('sample_euler' ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
4
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase ( self ): lowercase_ :Dict = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) lowercase_ :int = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowercase_ :Any = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowercase_ :str = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowercase_ :Optional[Any] = model(UpperCamelCase_ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) ) @slow def UpperCamelCase ( self ): lowercase_ :List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) lowercase_ :str = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowercase_ :Union[str, Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowercase_ :Optional[int] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowercase_ :str = model(UpperCamelCase_ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
252
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers SCREAMING_SNAKE_CASE : Dict = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def UpperCamelCase ( _a , _a=None ) -> Optional[int]: '''simple docstring''' require_version(deps[pkg] , _a )
252
1
from maths.prime_factors import prime_factors def A ( _lowerCamelCase ): '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : int = F"Input value of [number={number}] must be an integer" raise TypeError(_lowerCamelCase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(_lowerCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
36
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} A_ : int = ['a', 'b', 'c', 'd', 'e'] def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = start # add current to visited visited.append(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": A_ : Tuple = topological_sort('a', [], []) print(sort)
333
0
from decimal import Decimal, getcontext from math import ceil, factorial def __lowerCamelCase ( __lowerCAmelCase : int ) -> str: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) snake_case = precision snake_case = ceil(precision / 14 ) snake_case = 42_68_80 * Decimal(1_00_05 ).sqrt() snake_case = 1 snake_case = 13_59_14_09 snake_case = Decimal(__lowerCAmelCase ) for k in range(1 , __lowerCAmelCase ): snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _SCREAMING_SNAKE_CASE = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
359
'''simple docstring''' import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model") _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model") _SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( A__ , unittest.TestCase ): """simple docstring""" snake_case_ = CamembertTokenizer snake_case_ = CamembertTokenizerFast snake_case_ = True snake_case_ = True def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case = CamembertTokenizer(__snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Tuple )-> List[Any]: snake_case = """<pad>""" snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def lowerCAmelCase ( self : Dict )-> Optional[Any]: snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(__snake_case ) , 10_04 ) def lowerCAmelCase ( self : List[str] )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 10_05 ) def lowerCAmelCase ( self : List[str] )-> List[str]: snake_case = CamembertTokenizer(__snake_case ) tokenizer.save_pretrained(self.tmpdirname ) snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) snake_case = """I was born in 92000, and this is falsé.""" snake_case = tokenizer.encode(__snake_case ) snake_case = rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) snake_case = tokenizer.convert_ids_to_tokens(__snake_case ) snake_case = rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def lowerCAmelCase ( self : str )-> Any: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer() snake_case = """I was born in 92000, and this is falsé.""" snake_case = tokenizer.tokenize(__snake_case ) snake_case = rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) snake_case = self.get_rust_tokenizer() snake_case = tokenizer.encode(__snake_case ) snake_case = rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) @slow def lowerCAmelCase ( self : Any )-> Optional[int]: # fmt: off snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. snake_case = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
3
0
'''simple docstring''' from math import ceil def snake_case ( UpperCAmelCase , UpperCAmelCase )-> str: """simple docstring""" __A = list(range(0 , UpperCAmelCase ) ) __A = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check __A = [] for i in device_map_blocks: if device_map_blocks.count(UpperCAmelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(UpperCAmelCase ) # Missing blocks __A = [i for i in blocks if i not in device_map_blocks] __A = [i for i in device_map_blocks if i not in blocks] if len(UpperCAmelCase ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(UpperCAmelCase ) ) if len(UpperCAmelCase ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(UpperCAmelCase ) ) if len(UpperCAmelCase ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(UpperCAmelCase ) ) def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Union[str, Any]: """simple docstring""" __A = list(range(UpperCAmelCase ) ) __A = int(ceil(n_layers / len(UpperCAmelCase ) ) ) __A = [layers[i : i + n_blocks] for i in range(0 , UpperCAmelCase , UpperCAmelCase )] return dict(zip(UpperCAmelCase , UpperCAmelCase ) )
161
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING a__ : List[Any] = logging.get_logger(__name__) a__ : Union[str, Any] = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): UpperCAmelCase__ : List[str] = 'instructblip_vision_model' def __init__( self :List[str] , _A :str=1_408 , _A :List[str]=6_144 , _A :List[Any]=39 , _A :Optional[Any]=16 , _A :Tuple=224 , _A :Tuple=14 , _A :Tuple="gelu" , _A :Optional[Any]=1E-6 , _A :List[Any]=0.0 , _A :Dict=1E-10 , _A :List[str]=True , **_A :Dict , ) -> Dict: '''simple docstring''' super().__init__(**_A ) __A = hidden_size __A = intermediate_size __A = num_hidden_layers __A = num_attention_heads __A = patch_size __A = image_size __A = initializer_range __A = attention_dropout __A = layer_norm_eps __A = hidden_act __A = qkv_bias @classmethod def lowercase_ ( cls :Any , _A :Union[str, os.PathLike] , **_A :Tuple ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_A ) __A , __A = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __A = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_A , **_A ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): UpperCAmelCase__ : List[str] = 'instructblip_qformer' def __init__( self :Tuple , _A :int=30_522 , _A :List[str]=768 , _A :str=12 , _A :Optional[Any]=12 , _A :Union[str, Any]=3_072 , _A :str="gelu" , _A :Tuple=0.1 , _A :Dict=0.1 , _A :Dict=512 , _A :Union[str, Any]=0.02 , _A :int=1E-12 , _A :str=0 , _A :Union[str, Any]="absolute" , _A :List[str]=2 , _A :Optional[Any]=1_408 , **_A :Any , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = hidden_act __A = intermediate_size __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = initializer_range __A = layer_norm_eps __A = position_embedding_type __A = cross_attention_frequency __A = encoder_hidden_size @classmethod def lowercase_ ( cls :int , _A :Union[str, os.PathLike] , **_A :int ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_A ) __A , __A = cls.get_config_dict(_A , **_A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __A = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_A , **_A ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): UpperCAmelCase__ : Any = 'instructblip' UpperCAmelCase__ : List[Any] = True def __init__( self :Dict , _A :int=None , _A :Optional[Any]=None , _A :Optional[Any]=None , _A :Optional[Any]=32 , **_A :List[Any] ) -> Tuple: '''simple docstring''' super().__init__(**_A ) if vision_config is None: __A = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __A = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __A = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __A = InstructBlipVisionConfig(**_A ) __A = InstructBlipQFormerConfig(**_A ) __A = text_config['model_type'] if 'model_type' in text_config else 'opt' __A = CONFIG_MAPPING[text_model_type](**_A ) __A = self.text_config.tie_word_embeddings __A = self.text_config.is_encoder_decoder __A = num_query_tokens __A = self.vision_config.hidden_size __A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __A = 1.0 __A = 0.02 @classmethod def lowercase_ ( cls :int , _A :InstructBlipVisionConfig , _A :InstructBlipQFormerConfig , _A :PretrainedConfig , **_A :Any , ) -> Any: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , ) def lowercase_ ( self :int ) -> Tuple: '''simple docstring''' __A = copy.deepcopy(self.__dict__ ) __A = self.vision_config.to_dict() __A = self.qformer_config.to_dict() __A = self.text_config.to_dict() __A = self.__class__.model_type return output
161
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""OwlViTFeatureExtractor"""] SCREAMING_SNAKE_CASE_ = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
360
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) SCREAMING_SNAKE_CASE_ = """\ Text data. Second line of data.""" SCREAMING_SNAKE_CASE_ = """file""" @pytest.fixture(scope="""session""" ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") SCREAMING_SNAKE_CASE = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" ) with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f: f.write(_SCREAMING_SNAKE_CASE ) return path @pytest.fixture def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f: f.write(_SCREAMING_SNAKE_CASE ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} SCREAMING_SNAKE_CASE = input_paths[compression_format] SCREAMING_SNAKE_CASE = tmp_path / """cache""" SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE = f.read() with open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = """custom_cache""" SCREAMING_SNAKE_CASE = """custom_extracted_dir""" SCREAMING_SNAKE_CASE = tmp_path / """custom_extracted_path""" if default_extracted: SCREAMING_SNAKE_CASE = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) SCREAMING_SNAKE_CASE = xz_file SCREAMING_SNAKE_CASE = ( DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE ).resolve() ) assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file # relative path SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path(_SCREAMING_SNAKE_CASE ) # relative path SCREAMING_SNAKE_CASE = """./__missing_file__.txt""" with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path(_SCREAMING_SNAKE_CASE ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_from_cache(F"""tmp://{tmpfs_file}""" ) with open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE ) def __lowercase ( ) -> Dict: '''simple docstring''' with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(_SCREAMING_SNAKE_CASE ): http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(_SCREAMING_SNAKE_CASE ): ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE ) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(_SCREAMING_SNAKE_CASE ): fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): fsspec_head("""s3://huggingface.co""" )
193
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCAmelCase (__lowercase ,unittest.TestCase ): __snake_case : str = DiTPipeline __snake_case : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __snake_case : str = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } __snake_case : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __snake_case : int = False def UpperCamelCase ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=SCREAMING_SNAKE_CASE__ , ) _SCREAMING_SNAKE_CASE = AutoencoderKL() _SCREAMING_SNAKE_CASE = DDIMScheduler() _SCREAMING_SNAKE_CASE = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def UpperCamelCase ( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: Any=0 ): '''simple docstring''' if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): _SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: _SCREAMING_SNAKE_CASE = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def UpperCamelCase ( self: Tuple ): '''simple docstring''' _SCREAMING_SNAKE_CASE = """cpu""" _SCREAMING_SNAKE_CASE = self.get_dummy_components() _SCREAMING_SNAKE_CASE = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE = pipe(**SCREAMING_SNAKE_CASE__ ).images _SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _SCREAMING_SNAKE_CASE = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) _SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-3 ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCamelCase ( self: Any ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class __UpperCAmelCase (unittest.TestCase ): def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self: Dict ): '''simple docstring''' _SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _SCREAMING_SNAKE_CASE = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _SCREAMING_SNAKE_CASE = pipe.get_label_ids(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _SCREAMING_SNAKE_CASE = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCamelCase ( self: Dict ): '''simple docstring''' _SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _SCREAMING_SNAKE_CASE = ["""vase""", """umbrella"""] _SCREAMING_SNAKE_CASE = pipe.get_label_ids(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
306
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf ) __lowerCamelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCamelCase = new_cost_f __lowerCamelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int: __lowerCamelCase = -1 __lowerCamelCase = set() __lowerCamelCase = set() __lowerCamelCase = {source: 0} __lowerCamelCase = {destination: 0} __lowerCamelCase = {source: None} __lowerCamelCase = {destination: None} __lowerCamelCase = PriorityQueue() __lowerCamelCase = PriorityQueue() __lowerCamelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCamelCase , __lowerCamelCase = queue_forward.get() visited_forward.add(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = queue_backward.get() visited_backward.add(__lowerCAmelCase ) __lowerCamelCase = pass_and_relaxation( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) __lowerCamelCase = pass_and_relaxation( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCamelCase = shortest_distance return shortest_path_distance SCREAMING_SNAKE_CASE__ : List[Any] = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } SCREAMING_SNAKE_CASE__ : Optional[int] = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
270
0
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __a( _a ): """simple docstring""" lowerCAmelCase = 42 class __a( _a , _a ): """simple docstring""" lowerCAmelCase = True @register_to_config def __init__( self ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) ,_SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) ,_SCREAMING_SNAKE_CASE = (64,) ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 0.1_82_15 ,) -> Optional[int]: super().__init__() # pass init params to Encoder UpperCAmelCase_ : List[Any] = Encoder( in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,down_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,double_z=_SCREAMING_SNAKE_CASE ,) # pass init params to Decoder UpperCAmelCase_ : List[str] = Decoder( in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,up_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 ) UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,1 ) UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Dict = False # only relevant if vae tiling is enabled UpperCAmelCase_ : List[Any] = self.config.sample_size UpperCAmelCase_ : List[str] = ( self.config.sample_size[0] if isinstance(self.config.sample_size ,(list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : int = 0.25 def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> int: if isinstance(_SCREAMING_SNAKE_CASE ,(Encoder, Decoder) ): UpperCAmelCase_ : Union[str, Any] = value def a__ ( self ,_SCREAMING_SNAKE_CASE = True ) -> Optional[Any]: UpperCAmelCase_ : Dict = use_tiling def a__ ( self ) -> Optional[Any]: self.enable_tiling(_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : str = True def a__ ( self ) -> Any: UpperCAmelCase_ : Any = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a__ ( self ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : int = {} def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ): UpperCAmelCase_ : Tuple = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return processors def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ): if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): module.set_processor(_SCREAMING_SNAKE_CASE ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for name, module in self.named_children(): fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Tuple: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : int = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )] UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Optional[Any] = self.encoder(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = self.quant_conv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self.post_quant_conv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self.decoder(_SCREAMING_SNAKE_CASE ) if not return_dict: return (dec,) return DecoderOutput(sample=_SCREAMING_SNAKE_CASE ) @apply_forward_hook def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : Tuple = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : str = torch.cat(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : str = self._decode(_SCREAMING_SNAKE_CASE ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ : str = min(a.shape[2] ,b.shape[2] ,_SCREAMING_SNAKE_CASE ) for y in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ : Any = min(a.shape[3] ,b.shape[3] ,_SCREAMING_SNAKE_CASE ) for x in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput: UpperCAmelCase_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Any = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : str = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : Tuple = [] for i in range(0 ,x.shape[2] ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = [] for j in range(0 ,x.shape[3] ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : int = self.encoder(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = self.quant_conv(_SCREAMING_SNAKE_CASE ) row.append(_SCREAMING_SNAKE_CASE ) rows.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for i, row in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : str = [] for j, tile in enumerate(_SCREAMING_SNAKE_CASE ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if j > 0: UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) ) UpperCAmelCase_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 ) UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Any = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Tuple = [] for i in range(0 ,z.shape[2] ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = [] for j in range(0 ,z.shape[3] ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : List[str] = self.post_quant_conv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = self.decoder(_SCREAMING_SNAKE_CASE ) row.append(_SCREAMING_SNAKE_CASE ) rows.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = [] for i, row in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : int = [] for j, tile in enumerate(_SCREAMING_SNAKE_CASE ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Optional[int] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if j > 0: UpperCAmelCase_ : Optional[int] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) ) UpperCAmelCase_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : str = sample UpperCAmelCase_ : Optional[Any] = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist if sample_posterior: UpperCAmelCase_ : List[Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Union[str, Any] = posterior.mode() UpperCAmelCase_ : List[Any] = self.decode(_SCREAMING_SNAKE_CASE ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
235
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a = logging.get_logger(__name__) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowercase ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class __a( _a ): """simple docstring""" lowerCAmelCase = ['''pixel_values'''] def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None: super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' ) UpperCAmelCase_ : str = do_resize UpperCAmelCase_ : Union[str, Any] = size UpperCAmelCase_ : int = do_center_crop UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Optional[int] = resample UpperCAmelCase_ : List[Any] = do_rescale UpperCAmelCase_ : Tuple = rescale_factor UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray: UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" in size: UpperCAmelCase_ : Dict = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE ) elif "height" in size and "width" in size: UpperCAmelCase_ : Tuple = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray: UpperCAmelCase_ : str = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Dict: return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : Any = to_numpy_array(_SCREAMING_SNAKE_CASE ) if do_resize: UpperCAmelCase_ : Union[str, Any] = self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) if do_center_crop: UpperCAmelCase_ : Optional[int] = self.center_crop(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ) if do_rescale: UpperCAmelCase_ : str = self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) if do_normalize: UpperCAmelCase_ : List[Any] = self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return image def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image: UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : int = resample if resample is not None else self.resample UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : List[str] = size if size is not None else self.size UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase_ : List[Any] = make_batched(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = [ [ self._preprocess_image( image=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,crop_size=_SCREAMING_SNAKE_CASE ,do_rescale=_SCREAMING_SNAKE_CASE ,rescale_factor=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,image_mean=_SCREAMING_SNAKE_CASE ,image_std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,) for img in video ] for video in videos ] UpperCAmelCase_ : Any = {'''pixel_values''': videos} return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
235
1