code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowercase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) -> str:
A = TOKEN
HfFolder.save_token(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ) -> Optional[int]:
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
A = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A_ ,getattr(A_ ,A_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ ,repo_id='test-config' ,push_to_hub=A_ ,use_auth_token=self._token )
A = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A_ ,getattr(A_ ,A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
A = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
A = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A_ ,getattr(A_ ,A_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A_ ,repo_id='valid_org/test-config-org' ,push_to_hub=A_ ,use_auth_token=self._token )
A = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A_ ,getattr(A_ ,A_ ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
CustomConfig.register_for_auto_class()
A = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
A = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' ,trust_remote_code=A_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A = c.n_embd + 1 # int
A = c.resid_pdrop + 1.0 # float
A = not c.scale_attn_weights # bool
A = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(A_ ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(A_ ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(A_ ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(A_ ,c.summary_type ,'mismatch for key: summary_type' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A = PretrainedConfig()
A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
A_ ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
A = [key for key, value in config_common_kwargs.items() if value == getattr(A_ ,A_ )]
if len(A_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(A_ )}.' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
with self.assertRaises(A_ ):
# config is in subfolder, the following should not work without specifying the subfolder
A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=A_ ) as mock_head:
A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
A = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = AutoConfig.from_pretrained('bert-base-cased' )
A = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(A_ )
A = 2
json.dump(configuration.to_dict() ,open(os.path.join(A_ ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A = AutoConfig.from_pretrained(A_ )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A = ['config.42.0.0.json']
A = 768
configuration.save_pretrained(A_ )
shutil.move(os.path.join(A_ ,'config.4.0.0.json' ) ,os.path.join(A_ ,'config.42.0.0.json' ) )
A = AutoConfig.from_pretrained(A_ )
self.assertEqual(new_configuration.hidden_size ,768 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
A = 'v4.0.0'
A , A = new_transformers.models.auto.AutoConfig.from_pretrained(
A_ ,return_unused_kwargs=A_ )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(A_ ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A = 'v3.0.0'
A = old_transformers.models.auto.AutoConfig.from_pretrained(A_ )
self.assertEqual(old_configuration.hidden_size ,768 )
| 91
|
from __future__ import annotations
def a__ ( snake_case__ : list[int] ):
if len(snake_case__ ) == 0:
return array
_UpperCAmelCase,_UpperCAmelCase : List[str] = min(snake_case__ ), max(snake_case__ )
# Compute the variables
_UpperCAmelCase : Tuple = _max - _min + 1
_UpperCAmelCase,_UpperCAmelCase : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase : Optional[int] = i - _min
_UpperCAmelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase : List[Any] = 0
for i in range(snake_case__ ):
while holes_repeat[i] > 0:
_UpperCAmelCase : Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = input('Enter numbers separated by comma:\n')
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 643
| 0
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowercase : int = datasets.load_iris()
__lowercase : Union[str, Any] = np.array(data["data"])
__lowercase : Any = np.array(data["target"])
__lowercase : Tuple = data["target_names"]
__lowercase ,__lowercase ,__lowercase ,__lowercase : Union[str, Any] = train_test_split(X, y)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return np.linalg.norm(np.array(snake_case) - np.array(snake_case))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case=5):
__snake_case = zip(snake_case, snake_case)
# List of distances of all points from the point to be classified
__snake_case = []
for data_point in data:
__snake_case = euclidean_distance(data_point[0], snake_case)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
__snake_case = [i[1] for i in sorted(snake_case)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case = Counter(snake_case).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 93
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1))
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = 0
__snake_case = number
while duplicate > 0:
__snake_case , __snake_case = divmod(snake_case, 10)
fact_sum += factorial(snake_case)
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
__lowercase : Optional[int] = int(input("Enter number: ").strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 93
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : Tuple = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 376
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : float , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : str , __lowercase : bool = False , ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Embedding(__lowercase , __lowercase )
snake_case_ = nn.Embedding(__lowercase , __lowercase )
snake_case_ = False
snake_case_ = nn.Dropout(p=__lowercase )
snake_case_ = TaConfig(
vocab_size=__lowercase , d_model=__lowercase , num_heads=__lowercase , d_kv=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase , feed_forward_proj=__lowercase , is_decoder=__lowercase , is_encoder_decoder=__lowercase , )
snake_case_ = nn.ModuleList()
for lyr_num in range(__lowercase ):
snake_case_ = TaBlock(__lowercase )
self.encoders.append(__lowercase )
snake_case_ = TaLayerNorm(__lowercase )
snake_case_ = nn.Dropout(p=__lowercase )
def snake_case__ ( self : str , __lowercase : Dict , __lowercase : str ):
"""simple docstring"""
snake_case_ = self.token_embedder(__lowercase )
snake_case_ = encoder_input_tokens.shape[1]
snake_case_ = torch.arange(__lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(__lowercase )
snake_case_ = self.dropout_pre(__lowercase )
# inverted the attention mask
snake_case_ = encoder_input_tokens.size()
snake_case_ = self.get_extended_attention_mask(__lowercase , __lowercase )
for lyr in self.encoders:
snake_case_ = lyr(__lowercase , __lowercase )[0]
snake_case_ = self.layer_norm(__lowercase )
return self.dropout_post(__lowercase ), encoder_inputs_mask
| 376
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'mvp'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, A_=5_0267, A_=1024, A_=12, A_=4096, A_=16, A_=12, A_=4096, A_=16, A_=0.0, A_=0.0, A_="gelu", A_=1024, A_=0.1, A_=0.0, A_=0.0, A_=0.02, A_=0.0, A_=False, A_=True, A_=1, A_=0, A_=2, A_=True, A_=2, A_=2, A_=False, A_=100, A_=800, **A_, ) -> List[str]:
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =d_model
UpperCAmelCase__ =encoder_ffn_dim
UpperCAmelCase__ =encoder_layers
UpperCAmelCase__ =encoder_attention_heads
UpperCAmelCase__ =decoder_ffn_dim
UpperCAmelCase__ =decoder_layers
UpperCAmelCase__ =decoder_attention_heads
UpperCAmelCase__ =dropout
UpperCAmelCase__ =attention_dropout
UpperCAmelCase__ =activation_dropout
UpperCAmelCase__ =activation_function
UpperCAmelCase__ =init_std
UpperCAmelCase__ =encoder_layerdrop
UpperCAmelCase__ =decoder_layerdrop
UpperCAmelCase__ =classifier_dropout
UpperCAmelCase__ =use_cache
UpperCAmelCase__ =encoder_layers
UpperCAmelCase__ =scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ =use_prompt
UpperCAmelCase__ =prompt_length
UpperCAmelCase__ =prompt_mid_dim
super().__init__(
pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, is_encoder_decoder=A_, decoder_start_token_id=A_, forced_eos_token_id=A_, **A_, )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", A_ ):
UpperCAmelCase__ =self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
| 510
|
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A , A ) ) )
def _UpperCAmelCase ( A ):
'''simple docstring'''
if point:
if isinstance(A , A ):
for item in point:
if not isinstance(A , (int, float) ):
UpperCAmelCase__ =(
"Expected a list of numbers as input, found "
F"""{type(A ).__name__}"""
)
raise TypeError(A )
else:
UpperCAmelCase__ =F"""Expected a list of numbers as input, found {type(A ).__name__}"""
raise TypeError(A )
else:
raise ValueError("Missing an input" )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A , A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
import os
def a_ ( ):
__lowerCAmelCase = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'triangle.txt' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = []
for line in triangle:
__lowerCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1, len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
__lowerCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_, lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 53
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , *snake_case : Optional[Any] , **snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
_snake_case : Tuple = {}
def __UpperCAmelCase ( self : int , snake_case : List[Any] , *snake_case : Optional[Any] , **snake_case : List[Any] ):
"""simple docstring"""
_snake_case : str = super().add_tokens(snake_case , *snake_case , **snake_case )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : Optional[int] , *snake_case : Dict , snake_case : int=1 , **snake_case : int ):
"""simple docstring"""
_snake_case : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
else:
_snake_case : Optional[Any] = []
for i in range(snake_case ):
_snake_case : int = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
_snake_case : Dict = output
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : str , snake_case : Dict=False , snake_case : List[str]=1.0 ):
"""simple docstring"""
if isinstance(snake_case , snake_case ):
_snake_case : int = []
for i in range(len(snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_snake_case : Union[str, Any] = self.token_map[placeholder_token]
_snake_case : Any = tokens[: 1 + int(len(snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
_snake_case : Union[str, Any] = copy.copy(snake_case )
random.shuffle(snake_case )
_snake_case : List[str] = text.replace(snake_case , ' '.join(snake_case ) )
return text
def __call__( self : List[Any] , snake_case : Optional[int] , *snake_case : Any , snake_case : int=False , snake_case : str=1.0 , **snake_case : Optional[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
def __UpperCAmelCase ( self : int , snake_case : List[str] , *snake_case : List[Any] , snake_case : Optional[int]=False , snake_case : Optional[Any]=1.0 , **snake_case : Any ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
| 517
| 0
|
'''simple docstring'''
import operator as op
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : List[Any] = lambda _UpperCamelCase , _UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
lowercase_ : List[str] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
else:
lowercase_ : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
lowercase_ : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_UpperCamelCase ) , int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
UpperCamelCase__ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : Any = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
debug_launcher(test_script.main )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
debug_launcher(test_ops.main )
| 98
| 1
|
'''simple docstring'''
from math import factorial
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = real
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [1] * rank
else:
_snake_case = rank
def __repr__( self ):
"""simple docstring"""
return (
F'{self.real}+'
F'{"+".join(str(lowerCAmelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCAmelCase_ )
def __add__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return Dual(self.real + other , self.duals )
_snake_case = self.duals.copy()
_snake_case = other.duals.copy()
if len(lowerCAmelCase_ ) > len(lowerCAmelCase_ ):
o_dual.extend([1] * (len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )) )
elif len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
s_dual.extend([1] * (len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )) )
_snake_case = []
for i in range(len(lowerCAmelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCAmelCase_ )
__lowercase = __add__
def __sub__( self , lowerCAmelCase_ ):
"""simple docstring"""
return self + other * -1
def __mul__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCAmelCase_ )
_snake_case = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCAmelCase_ )
__lowercase = __mul__
def __truediv__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCAmelCase_ )
raise ValueError
def __floordiv__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCAmelCase_ )
raise ValueError
def __pow__( self , lowerCAmelCase_ ):
"""simple docstring"""
if n < 0 or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_snake_case = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
if not callable(__A ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(__A , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(__A , __A ):
raise ValueError('differentiate() requires an int as input for order' )
_snake_case = Dual(__A , 1 )
_snake_case = func(__A )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 542
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = StableDiffusionInstructPixaPixPipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 'french fries'
_snake_case = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = [inputs['prompt']] * 2
_snake_case = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
_snake_case = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
_snake_case = image / 2 + 0.5
_snake_case = image.permute(0 , 3 , 1 , 2 )
_snake_case = image.repeat(2 , 1 , 1 , 1 )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_snake_case = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' )
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
_snake_case = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
_snake_case = components['vae']
_snake_case = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_snake_case = vae.encode(inputs[image_param] ).latent_dist.mode()
_snake_case = pipe(**lowerCAmelCase_ )[0]
_snake_case = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_snake_case = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
_snake_case = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0
def callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_snake_case = False
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_snake_case = inputs['image'].resize((5_04, 5_04) )
_snake_case = 'timbrooks/instruct-pix2pix'
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0]
_snake_case = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_snake_case = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 542
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=a , repr=a )
def __call__( self : Any ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=a , repr=a )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sorted(set(self.languages ) ) if self.languages else None
__SCREAMING_SNAKE_CASE = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set(self.languages )
if self.languages and set(UpperCamelCase_ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(UpperCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase_ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__SCREAMING_SNAKE_CASE = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__SCREAMING_SNAKE_CASE = zip(*sorted(UpperCamelCase_ ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 627
|
"""simple docstring"""
from math import pi, sqrt, tan
def __UpperCAmelCase ( __UpperCamelCase ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
__lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def __UpperCAmelCase ( __UpperCamelCase ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
__lowercase : int = (sidea + sidea + sidea) / 2
__lowercase : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(F"Square: {area_square(1_0) = }")
print(F"Triangle: {area_triangle(1_0, 1_0) = }")
print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(F"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(F"Circle: {area_circle(2_0) = }")
print(F"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(F"Cube: {surface_area_cube(2_0) = }")
print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(F"Sphere: {surface_area_sphere(2_0) = }")
print(F"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(F"Cone: {surface_area_cone(1_0, 2_0) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(F"Torus: {surface_area_torus(2_0, 1_0) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(F"Square: {area_reg_polygon(4, 1_0) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 76
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
lowerCAmelCase_: str = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "transfo-xl"
snake_case_ = ["mems"]
snake_case_ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=26_7735, _UpperCAmelCase=[2_0000, 4_0000, 20_0000], _UpperCAmelCase=1024, _UpperCAmelCase=1024, _UpperCAmelCase=16, _UpperCAmelCase=64, _UpperCAmelCase=4096, _UpperCAmelCase=4, _UpperCAmelCase=False, _UpperCAmelCase=18, _UpperCAmelCase=1600, _UpperCAmelCase=1000, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=0, _UpperCAmelCase=-1, _UpperCAmelCase=True, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=True, _UpperCAmelCase="normal", _UpperCAmelCase=0.01, _UpperCAmelCase=0.01, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=0, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
@property
def snake_case__ ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 720
|
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase__ : Any = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__( self ) -> int:
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : Any = self.get_image_processor()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
lowercase__ : List[Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ : Any = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
lowercase__ : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Tuple = image_processor(lowerCamelCase__ , return_tensors="""np""" )
lowercase__ : str = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[str] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Any = """lower newer"""
lowercase__ : List[Any] = processor(text=lowerCamelCase__ )
lowercase__ : Tuple = tokenizer(lowerCamelCase__ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : List[Any] = """lower newer"""
lowercase__ : List[str] = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Tuple = processor.batch_decode(lowerCamelCase__ )
lowercase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : str = """lower newer"""
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 200
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase__ : Any = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__( self ) -> int:
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : Any = self.get_image_processor()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
lowercase__ : List[Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ : Any = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
lowercase__ : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Tuple = image_processor(lowerCamelCase__ , return_tensors="""np""" )
lowercase__ : str = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[str] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Any = """lower newer"""
lowercase__ : List[Any] = processor(text=lowerCamelCase__ )
lowercase__ : Tuple = tokenizer(lowerCamelCase__ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : List[Any] = """lower newer"""
lowercase__ : List[str] = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Tuple = processor.batch_decode(lowerCamelCase__ )
lowercase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : str = """lower newer"""
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 200
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
def UpperCAmelCase ( A__: Optional[Any] , A__: Dict , A__: Tuple ) -> List[str]:
__lowerCamelCase : str = Path(lowercase_ )
__lowerCamelCase : int = Path(lowercase_ )
dest_dir.mkdir(exist_ok=lowercase_ )
for path in src_dir.iterdir():
__lowerCamelCase : Any = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCamelCase : Dict = dest_dir.joinpath(path.name )
print(lowercase_ )
dest_path.open('w' ).write('\n'.join(lowercase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 707
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ):
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__lowerCamelCase : List[Any] = field
__lowerCamelCase : Dict = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths}
__lowerCamelCase : str = Json(
cache_dir=__a , data_files=__a , features=__a , field=__a , **__a , )
def snake_case_ ( self ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : Any = None
__lowerCamelCase : Any = None
__lowerCamelCase : str = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__lowerCamelCase : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class __lowercase:
'''simple docstring'''
def __init__( self , __a , __a , __a = None , __a = None , **__a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : Optional[int] = path_or_buf
__lowerCamelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase : str = num_proc
__lowerCamelCase : Optional[Any] = 'utf-8'
__lowerCamelCase : Optional[int] = to_json_kwargs
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.to_json_kwargs.pop('path_or_buf' , __a )
__lowerCamelCase : Any = self.to_json_kwargs.pop('orient' , 'records' )
__lowerCamelCase : List[Any] = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__lowerCamelCase : Tuple = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__lowerCamelCase : int = self.to_json_kwargs.pop('compression' , __a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__a ) as buffer:
__lowerCamelCase : int = self._write(file_obj=__a , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
__lowerCamelCase : Dict = self._write(
file_obj=self.path_or_buf , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
return written
def snake_case_ ( self , __a ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = args
__lowerCamelCase : int = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase : List[str] = batch.to_pandas().to_json(
path_or_buf=__a , orient=__a , lines=__a , index=__a , **__a )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def snake_case_ ( self , __a , __a , __a , __a , **__a , ):
__lowerCamelCase : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__lowerCamelCase : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__a )
else:
__lowerCamelCase , __lowerCamelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__a )
return written
| 263
| 0
|
import numpy
class _UpperCamelCase :
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> None:
__lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase = numpy.zeros(output_array.shape )
def __UpperCAmelCase ( self )-> numpy.ndarray:
__lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __UpperCAmelCase ( self )-> None:
__lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def __UpperCAmelCase ( self , __UpperCamelCase )-> int:
__lowerCAmelCase = input_arr
__lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowerCAmelCase ( __snake_case ):
return 1 / (1 + numpy.exp(-value ))
def __lowerCAmelCase ( __snake_case ):
return (value) * (1 - (value))
def __lowerCAmelCase ( ):
__lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=A__ , output_array=A__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=A__ , iterations=10 , give_loss=A__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 367
|
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , __a : int ) ->Optional[int]:
lowerCamelCase_ : Optional[Any] = n
lowerCamelCase_ : Dict = [None] * self.n
lowerCamelCase_ : int = 0 # index of the first element
lowerCamelCase_ : str = 0
lowerCamelCase_ : Optional[Any] = 0
def __len__( self : List[str] ) ->int:
return self.size
def _lowerCAmelCase ( self : str ) ->bool:
return self.size == 0
def _lowerCAmelCase ( self : str ) ->Any:
return False if self.is_empty() else self.array[self.front]
def _lowerCAmelCase ( self : Union[str, Any] , __a : int ) ->List[Any]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowerCamelCase_ : Tuple = data
lowerCamelCase_ : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCAmelCase ( self : Tuple ) ->int:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowerCamelCase_ : str = self.array[self.front]
lowerCamelCase_ : str = None
lowerCamelCase_ : Optional[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 278
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_A , aesthetic_weight=0.1 )
os.makedirs(f"{class_data_dir}/images" , exist_ok=_A )
if len(list(Path(f"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=_A )
if len(_A ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_A , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc="downloading real regularization images" , total=_A )
with open(f"{class_data_dir}/caption.txt" , "w" ) as fa, open(f"{class_data_dir}/urls.txt" , "w" ) as fa, open(
f"{class_data_dir}/images.txt" , "w" ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images["url"] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"{class_data_dir}/images/{total}.jpg" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"{class_data_dir}/images/{total}.jpg" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("" , add_help=_A )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_A , type=_A )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_A , type=_A )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_A )
return parser.parse_args()
if __name__ == "__main__":
lowercase__ : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 139
|
from __future__ import annotations
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(_A )] for _ in range(_A )]
snake_case_ = run_maze(_A , 0 , 0 , _A )
if solved:
print("\n".join(str(_A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = len(_A )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
A__ : Tuple = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) -> Union[str, Any]:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , "sklearn" )
return (preds == labels).mean()
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ) -> List[Any]:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , "sklearn" )
lowerCamelCase_ : List[str] =simple_accuracy(lowercase_ , lowercase_ )
lowerCamelCase_ : Dict =fa_score(y_true=lowercase_ , y_pred=lowercase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Optional[int]:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , "sklearn" )
lowerCamelCase_ : Union[str, Any] =pearsonr(lowercase_ , lowercase_ )[0]
lowerCamelCase_ : Dict =spearmanr(lowercase_ , lowercase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) -> Any:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , "sklearn" )
assert len(lowercase_ ) == len(lowercase_ ), F"""Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase_ , lowercase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase_ , lowercase_ )
elif task_name == "qqp":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) -> Optional[Any]:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , "sklearn" )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
| 153
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] ): # noqa: E741
while r - l > 1:
lowercase = (l + r) // 2
if v[m] >= key:
lowercase = m
else:
lowercase = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] ):
if len(lowercase_ ) == 0:
return 0
lowercase = [0] * len(lowercase_ )
lowercase = 1
lowercase = v[0]
for i in range(1 , len(lowercase_ ) ):
if v[i] < tail[0]:
lowercase = v[i]
elif v[i] > tail[length - 1]:
lowercase = v[i]
length += 1
else:
lowercase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/"""))
_SCREAMING_SNAKE_CASE : Any = self.transformer_dir
shutil.copy(
os.path.join(_A , """src/transformers/models/bert/modeling_bert.py""") , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""") , )
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir)
def _lowerCAmelCase ( self : int , _A : int , _A : Optional[int] , _A : Any , _A : Tuple=None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_SCREAMING_SNAKE_CASE : Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_SCREAMING_SNAKE_CASE : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(_A , mode=_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""")
with open(_A , """w""" , newline="""\n""") as f:
f.write(_A)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=_A)
with open(_A , """r""") as f:
self.assertTrue(f.read() , _A)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""")
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _A , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _A) , )
# Copy consistency with a really long name
_SCREAMING_SNAKE_CASE : Optional[Any] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , _A , _A) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _A , overwrite_result=re.sub("""Bert""" , """TestModel""" , _A) , )
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_SCREAMING_SNAKE_CASE : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_SCREAMING_SNAKE_CASE : List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = check_copies.convert_to_localized_md(
_A , _A , localized_readme["""format_model_list"""])
self.assertFalse(_A)
self.assertEqual(_A , _A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.convert_to_localized_md(
_A , _A , localized_readme["""format_model_list"""])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_A)
_SCREAMING_SNAKE_CASE : int = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = check_copies.convert_to_localized_md(
_A , _A , localized_readme["""format_model_list"""])
# Check if the model link is synchronized.
self.assertEqual(_A , _A)
| 635
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """dpr"""
def __init__( self :Any , lowerCamelCase_ :str=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :Union[str, Any]=1E-12 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :int = 0 , **lowerCamelCase_ :Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = projection_dim
SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
| 698
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698
| 1
|
def UpperCamelCase_( lowerCamelCase_ ) -> list:
_lowercase : Any = len(lowerCamelCase_ )
for i in range(1 , lowerCamelCase_ ):
_lowercase : Tuple = collection[i]
_lowercase : str = 0
_lowercase : Optional[int] = i - 1
while low <= high:
_lowercase : int = (low + high) // 2
if val < collection[mid]:
_lowercase : Union[str, Any] = mid - 1
else:
_lowercase : Tuple = mid + 1
for j in range(lowerCamelCase_ , lowerCamelCase_ , -1 ):
_lowercase : str = collection[j - 1]
_lowercase : Union[str, Any] = val
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 354
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = (DPMSolverSinglestepScheduler,)
lowercase_ : List[str] = (("""num_inference_steps""", 25),)
def UpperCamelCase ( self, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCamelCase)
return config
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Union[str, Any] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : Optional[int] = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
_lowercase : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**lowerCamelCase)
_lowercase : List[Any] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase)
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase : List[Any] = sample, sample
for t in range(lowerCamelCase, time_step + scheduler.config.solver_order + 1):
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : int = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = dict(self.forward_default_kwargs)
_lowercase : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : List[str] = self.dummy_sample
_lowercase : str = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : List[Any] = scheduler_class.from_pretrained(lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : List[Any] = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self, lowerCamelCase=None, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
_lowercase : str = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Optional[int] = model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
return sample
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : Optional[int] = 50
_lowercase : Union[str, Any] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
_lowercase : Optional[Any] = model(lowerCamelCase, lowerCamelCase)
_lowercase : int = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_5_7_4) < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : List[str] = self.full_loop(scheduler=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
_lowercase : str = DEISMultistepScheduler.from_config(scheduler.config)
_lowercase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config)
_lowercase : Tuple = UniPCMultistepScheduler.from_config(scheduler.config)
_lowercase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config)
_lowercase : Any = self.full_loop(scheduler=lowerCamelCase)
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase, prediction_type=lowerCamelCase, sample_max_value=lowerCamelCase, algorithm_type='dpmsolver++', solver_order=lowerCamelCase, solver_type=lowerCamelCase, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
_lowercase : Optional[Any] = self.full_loop(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
assert not torch.isnan(lowerCamelCase).any(), "Samples have nan numbers"
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase)
self.check_over_configs(lower_order_final=lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase)
self.check_over_configs(variance_type='learned_range')
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=0)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.full_loop()
_lowercase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(use_karras_sigmas=lowerCamelCase)
_lowercase : List[str] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_2_4_8) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction')
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.1_4_5_3) < 1E-3
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.0_6_4_9) < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase, dynamic_thresholding_ratio=0)
_lowercase : Any = scheduler_class(**lowerCamelCase)
_lowercase : str = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Tuple = model(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
assert sample.dtype == torch.floataa
| 354
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__a = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__a = 128_022
__a = 128_028
@require_sentencepiece
class __a( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = MaMaaaTokenizer
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase_ : Optional[int] = dict(zip(a__ ,range(len(a__ ) ) ) )
UpperCAmelCase_ : List[str] = Path(self.tmpdirname )
save_json(a__ ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
UpperCAmelCase_ : Dict = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**a__ )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
return (
"This is a test",
"This is a test",
)
def a__ ( self ) -> str:
UpperCAmelCase_ : Optional[Any] = """</s>"""
UpperCAmelCase_ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) ,a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) ,a__ )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(a__ ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) ,[2, 3, 4, 5, 6] ,)
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(a__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(a__ )
self.assertEqual(a__ ,'''This is a test''' )
@slow
def a__ ( self ) -> int:
# fmt: off
UpperCAmelCase_ : Any = {"""input_ids""": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = "facebook/m2m100_418M"
lowerCAmelCase = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
lowerCAmelCase = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
lowerCAmelCase = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def a__ ( cls ) -> Tuple:
UpperCAmelCase_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
UpperCAmelCase_ : str = 1
return cls
def a__ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,128_063 )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.tokenizer.get_vocab()
self.assertEqual(len(a__ ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,a__ )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = """en"""
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,a__ )
def a__ ( self ) -> Any:
self.assertIn(a__ ,self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
UpperCAmelCase_ : Optional[Any] = self.tokenizer.decode(a__ ,skip_special_tokens=a__ )
UpperCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=a__ )
self.assertEqual(a__ ,a__ )
self.assertNotIn(self.tokenizer.eos_token ,a__ )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : Tuple = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(a__ )
UpperCAmelCase_ : List[Any] = MaMaaaTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.lang_token_to_id ,a__ )
@require_torch
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[str] = """en"""
UpperCAmelCase_ : str = """fr"""
UpperCAmelCase_ : str = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=a__ ,return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
UpperCAmelCase_ : int = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
UpperCAmelCase_ : Optional[int] = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCAmelCase_ : int = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(a__ ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} ,)
| 30
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = (UniPCMultistepScheduler,)
_UpperCamelCase : List[str] = (("num_inference_steps", 25),)
def __A ( self , **a__ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**a__ )
return config
def __A ( self , a__=0 , **a__ ):
_lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , a__ )
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : int = self.get_scheduler_config(**a__ )
_lowerCAmelCase : Dict = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_lowerCAmelCase : List[str] = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_lowerCAmelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Optional[Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self , a__=0 , **a__ ):
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , a__ )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[int] = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : int = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self , a__=None , **a__ ):
if scheduler is None:
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**a__ )
_lowerCAmelCase : Optional[int] = scheduler_class(**a__ )
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(**a__ )
_lowerCAmelCase : Tuple = scheduler_class(**a__ )
_lowerCAmelCase : Optional[int] = 10
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : int = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(a__ , a__ )
_lowerCAmelCase : int = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , a__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[int] = scheduler_class(**a__ )
_lowerCAmelCase : Optional[int] = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ):
_lowerCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : List[str] = scheduler.timesteps[5]
_lowerCAmelCase : Optional[Any] = scheduler.timesteps[6]
_lowerCAmelCase : Optional[Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : int = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : List[Any] = self.full_loop(scheduler=a__ )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
_lowerCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=a__ )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __A ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __A ( self ):
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , solver_order=a__ , solver_type=a__ , )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __A ( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , )
_lowerCAmelCase : Tuple = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def __A ( self ):
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def __A ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def __A ( self ):
_lowerCAmelCase : List[str] = self.full_loop()
_lowerCAmelCase : Dict = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __A ( self ):
_lowerCAmelCase : Dict = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : str = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
_lowerCAmelCase : int = scheduler_class(**a__ )
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Optional[Any] = model(a__ , a__ )
_lowerCAmelCase : Tuple = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
def __A ( self , **a__ ):
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**a__ )
_lowerCAmelCase : Optional[Any] = scheduler_class(**a__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 213
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
__lowerCamelCase : Optional[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
__lowerCamelCase : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
__lowerCamelCase : Union[str, Any] = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCAmelCase__ (self : List[str] , A__ : List[Any] , A__ : List[str] ) -> str:
lowercase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowercase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowercase = evaluate(dataset=A__ , predictions=A__ )
return score
| 713
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''vit_mae'''
def __init__(self : Dict , A__ : Tuple=7_6_8 , A__ : str=1_2 , A__ : Dict=1_2 , A__ : Optional[Any]=3_0_7_2 , A__ : Union[str, Any]="gelu" , A__ : Optional[int]=0.0 , A__ : str=0.0 , A__ : Optional[Any]=0.0_2 , A__ : Optional[Any]=1e-12 , A__ : Tuple=2_2_4 , A__ : str=1_6 , A__ : Optional[int]=3 , A__ : List[Any]=True , A__ : Dict=1_6 , A__ : Dict=5_1_2 , A__ : Dict=8 , A__ : Union[str, Any]=2_0_4_8 , A__ : str=0.7_5 , A__ : str=False , **A__ : Any , ) -> Union[str, Any]:
super().__init__(**A__ )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
lowercase = decoder_num_attention_heads
lowercase = decoder_hidden_size
lowercase = decoder_num_hidden_layers
lowercase = decoder_intermediate_size
lowercase = mask_ratio
lowercase = norm_pix_loss
| 459
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case (__lowercase , __lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
try:
_snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case : Optional[int] = strtobool(__lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__SCREAMING_SNAKE_CASE : Any = parse_flag_from_env('RUN_SLOW', default=False)
__SCREAMING_SNAKE_CASE : Optional[Any] = parse_flag_from_env('RUN_REMOTE', default=False)
__SCREAMING_SNAKE_CASE : int = parse_flag_from_env('RUN_LOCAL', default=True)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__SCREAMING_SNAKE_CASE : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__SCREAMING_SNAKE_CASE : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__SCREAMING_SNAKE_CASE : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__SCREAMING_SNAKE_CASE : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__SCREAMING_SNAKE_CASE : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__SCREAMING_SNAKE_CASE : List[str] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
_snake_case : Optional[int] = unittest.skip("test requires faiss" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
_snake_case : int = unittest.skip("test requires regex" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
_snake_case : Tuple = unittest.skip("test requires elasticsearch" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
_snake_case : List[Any] = unittest.skip("test requires sqlalchemy" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
_snake_case : Optional[Any] = unittest.skip("test requires PyTorch" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
_snake_case : Tuple = unittest.skip("test requires TensorFlow" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
_snake_case : List[str] = unittest.skip("test requires JAX" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
if not config.PIL_AVAILABLE:
_snake_case : Union[str, Any] = unittest.skip("test requires Pillow" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__lowerCamelCase )
else:
return test_case
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__lowerCamelCase )
else:
return test_case
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__lowerCamelCase )
else:
return test_case
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__lowercase ):
try:
import spacy # noqa F401
spacy.load(__lowerCamelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__lowerCamelCase )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(__lowerCamelCase ) )(__lowerCamelCase )
else:
return test_case
return _require_spacy_model
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__lowerCamelCase )
else:
return test_case
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__lowerCamelCase )
else:
return test_case
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
_snake_case : Tuple = unittest.skip("test is slow" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
_snake_case : Union[str, Any] = unittest.skip("test is local" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
_snake_case : Tuple = unittest.skip("test is packaged" )(__lowerCamelCase )
return test_case
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
_snake_case : Union[str, Any] = unittest.skip("test requires remote" )(__lowerCamelCase )
return test_case
def snake_case (*__lowercase ) -> Union[str, Any]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__lowerCamelCase ) and name.startswith("test" ):
for decorator in decorators:
_snake_case : Any = decorator(__lowerCamelCase )
setattr(cls , __lowerCamelCase , __lowerCamelCase )
return cls
return decorate
class lowercase_ ( A_ ):
pass
class lowercase_ ( A_ ):
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 2
@contextmanager
def snake_case (__lowercase=OfflineSimulationMode.CONNECTION_FAILS , __lowercase=1e-16 ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Dict = requests.Session().request
def timeout_request(__lowercase , __lowercase , __lowercase , **__lowercase ):
# Change the url to an invalid url so that the connection hangs
_snake_case : Optional[int] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_snake_case : Any = timeout
try:
return online_request(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_snake_case : Tuple = url
_snake_case : Dict = e.args[0]
_snake_case : Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , F"""OfflineMock[{url}]""" ),)
_snake_case : List[str] = (max_retry_error,)
raise
def raise_connection_error(__lowercase , __lowercase , **__lowercase ):
raise requests.ConnectionError("Offline mode is enabled." , request=__lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def snake_case (*__lowercase , **__lowercase ) -> str:
'''simple docstring'''
_snake_case : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowerCamelCase , **__lowerCamelCase ) as tmp_dir:
try:
os.chdir(__lowerCamelCase )
yield
finally:
os.chdir(__lowerCamelCase )
@contextmanager
def snake_case () -> int:
'''simple docstring'''
import gc
gc.collect()
_snake_case : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case () -> int:
'''simple docstring'''
import gc
gc.collect()
_snake_case : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case (__lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
return deepcopy(__lowerCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowerCamelCase ).integers(0 , 100 , 10 ).tolist()
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase , *__lowercase , **__lowercase ):
try:
return func(*__lowerCamelCase , **__lowerCamelCase )
except HTTPError as err:
if str(__lowerCamelCase ).startswith("500" ) or str(__lowerCamelCase ).startswith("502" ):
pytest.xfail(str(__lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , __lowerCamelCase )
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : str = returncode
_snake_case : List[str] = stdout
_snake_case : List[str] = stderr
async def snake_case (__lowercase , __lowercase ) -> Any:
'''simple docstring'''
while True:
_snake_case : Union[str, Any] = await stream.readline()
if line:
callback(__lowerCamelCase )
else:
break
async def snake_case (__lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(__lowerCamelCase ) )
_snake_case : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case : Any = []
_snake_case : Union[str, Any] = []
def tee(__lowercase , __lowercase , __lowercase , __lowercase="" ):
_snake_case : str = line.decode("utf-8" ).rstrip()
sink.append(__lowerCamelCase )
if not quiet:
print(__lowerCamelCase , __lowerCamelCase , file=__lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowerCamelCase , __lowerCamelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowerCamelCase , __lowerCamelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__lowerCamelCase , )
return _RunOutput(await p.wait() , __lowerCamelCase , __lowerCamelCase )
def snake_case (__lowercase , __lowercase=None , __lowercase=None , __lowercase=180 , __lowercase=False , __lowercase=True ) -> _RunOutput:
'''simple docstring'''
_snake_case : Dict = asyncio.get_event_loop()
_snake_case : List[Any] = loop.run_until_complete(
_stream_subprocess(__lowerCamelCase , env=__lowerCamelCase , stdin=__lowerCamelCase , timeout=__lowerCamelCase , quiet=__lowerCamelCase , echo=__lowerCamelCase ) )
_snake_case : str = " ".join(__lowerCamelCase )
if result.returncode > 0:
_snake_case : str = "\n".join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""\'{cmd_str}\' produced no output.""" )
return result
def snake_case () -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
_snake_case : Optional[Any] = re.sub(r"^gw" , "" , __lowerCamelCase , 0 , re.M )
return int(__lowerCamelCase )
def snake_case () -> Union[str, Any]:
'''simple docstring'''
_snake_case : List[Any] = 29_500
_snake_case : int = pytest_xdist_worker_id()
return port + uniq_delta
| 670
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """switch_transformers"""
__a = ["""past_key_values"""]
__a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[Any] , _lowerCamelCase : Any=32128 , _lowerCamelCase : List[Any]=768 , _lowerCamelCase : Any=64 , _lowerCamelCase : Dict=2048 , _lowerCamelCase : int=64 , _lowerCamelCase : Dict=12 , _lowerCamelCase : str=3 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : int=False , _lowerCamelCase : Union[str, Any]=0.0_1 , _lowerCamelCase : Any="float32" , _lowerCamelCase : str=False , _lowerCamelCase : Optional[int]=32 , _lowerCamelCase : Any=128 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Any=1e-6 , _lowerCamelCase : Union[str, Any]=0.0_0_1 , _lowerCamelCase : Tuple=0.0_0_1 , _lowerCamelCase : Dict=1.0 , _lowerCamelCase : int="relu" , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=True , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : List[Any]=1 , **_lowerCamelCase : Optional[Any] , ):
_snake_case = vocab_size
_snake_case = d_model
_snake_case = d_kv
_snake_case = d_ff
_snake_case = num_sparse_encoder_layers
_snake_case = num_layers
_snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_snake_case = self.num_layers // self.num_sparse_encoder_layers
else:
_snake_case = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_snake_case = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_snake_case = self.num_decoder_layers # HACK: this will create 0 sparse layers
_snake_case = num_heads
_snake_case = num_experts
_snake_case = expert_capacity
_snake_case = router_bias
_snake_case = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_snake_case = router_dtype
_snake_case = router_ignore_padding_tokens
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = dropout_rate
_snake_case = layer_norm_epsilon
_snake_case = initializer_factor
_snake_case = feed_forward_proj
_snake_case = use_cache
_snake_case = add_router_probs
_snake_case = router_z_loss_coef
_snake_case = router_aux_loss_coef
_snake_case = self.feed_forward_proj.split('''-''' )
_snake_case = act_info[-1]
_snake_case = act_info[0] == '''gated'''
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case = '''gelu_new'''
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , )
| 224
| 0
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = 1
snake_case : List[Any] = 2
snake_case : Any = 3
snake_case : List[str] = 4
snake_case : Dict = 5
snake_case : int = 6
snake_case : Optional[int] = 7
snake_case : Tuple = 8
snake_case : List[str] = 9
snake_case : Union[str, Any] = 10
snake_case : Tuple = 11
snake_case : Tuple = 12
snake_case : Optional[int] = 13
snake_case : Optional[int] = 14
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : torch.FloatTensor
class __SCREAMING_SNAKE_CASE :
snake_case : Optional[Any] = SCHEDULER_CONFIG_NAME
snake_case : Any = []
snake_case : List[str] = True
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , **__lowerCAmelCase , ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , return_commit_hash=__lowerCAmelCase , **__lowerCAmelCase , )
return cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
return self._get_compatibles()
@classmethod
def _lowerCamelCase ( cls ):
UpperCamelCase__ = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase__ = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase__ = [
getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase )
]
return compatible_classes
| 548
|
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while repunit:
UpperCamelCase__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (a__ :int = 100_0000 ):
"""simple docstring"""
UpperCamelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548
| 1
|
"""simple docstring"""
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : int = set()
# edges = list of graph's edges
_lowerCamelCase : Optional[int] = get_edges(A_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCamelCase , _lowerCamelCase : Dict = edges.pop()
chosen_vertices.add(A_ )
chosen_vertices.add(A_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A_ )
return chosen_vertices
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 83
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case (nn.Module ):
def __init__( self ,UpperCAmelCase_ ) -> Dict:
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase_ )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*UpperCAmelCase_ )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self ,UpperCAmelCase_ ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase__ = self.pool(self.model(UpperCAmelCase_ ) )
lowercase__ = torch.flatten(UpperCAmelCase_ ,start_dim=2 )
lowercase__ = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = [json.loads(UpperCAmelCase_ ) for l in open(UpperCAmelCase_ )]
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(UpperCAmelCase_ )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=UpperCAmelCase_ ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" )
lowercase__ = self.transforms(UpperCAmelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> str:
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase ( _snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = [len(row["sentence"] ) for row in batch]
lowercase__ , lowercase__ = len(_snake_case ), max(_snake_case )
lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long )
lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_snake_case ,_snake_case ) ):
lowercase__ = input_row["sentence"]
lowercase__ = 1
lowercase__ = torch.stack([row["image"] for row in batch] )
lowercase__ = torch.stack([row["label"] for row in batch] )
lowercase__ = torch.stack([row["image_start_token"] for row in batch] )
lowercase__ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,),
] )
| 267
| 0
|
"""simple docstring"""
import numpy as np
import datasets
__a : Any = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a : List[Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a : Union[str, Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self: List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowercase ( self: Optional[int] , __A: int , __A: List[str] ):
'''simple docstring'''
a__ = np.array(__A )
a__ = np.array(__A )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
a__ = X - np.mean(__A )
a__ = np.cov(reference_distribution.T )
try:
a__ = np.linalg.inv(__A )
except np.linalg.LinAlgError:
a__ = np.linalg.pinv(__A )
a__ = np.dot(__A , __A )
a__ = np.dot(__A , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 702
|
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Any ):
'''simple docstring'''
a__ = {}
def lowercase ( self: Optional[int] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__A , ''' -> ''' , ''' -> '''.join([str(__A ) for j in self.vertex[i]] ) )
def lowercase ( self: Union[str, Any] , __A: int , __A: int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__A )
else:
# else make a new vertex
a__ = [to_vertex]
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__A , __A )
def lowercase ( self: Optional[int] , __A: int , __A: list ):
'''simple docstring'''
a__ = True
print(__A , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__A , __A )
if __name__ == "__main__":
__a : Any = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 200
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
__lowerCAmelCase = precision
__lowerCAmelCase = ceil(precision / 1_4 )
__lowerCAmelCase = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
__lowerCAmelCase = 1
__lowerCAmelCase = 1_3_5_9_1_4_0_9
__lowerCAmelCase = Decimal(UpperCamelCase )
for k in range(1 , UpperCamelCase ):
__lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase_ = 5_0
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 611
| 0
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Any = """xlm-prophetnet"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : Optional[int] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__(self , lowercase__ = 0.1 , lowercase__ = "gelu" , lowercase__ = 3_05_22 , lowercase__ = 10_24 , lowercase__ = 40_96 , lowercase__ = 12 , lowercase__ = 16 , lowercase__ = 40_96 , lowercase__ = 12 , lowercase__ = 16 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 5_12 , lowercase__ = 0.02 , lowercase__ = True , lowercase__ = True , lowercase__ = 0 , lowercase__ = 2 , lowercase__ = 32 , lowercase__ = 1_28 , lowercase__ = False , lowercase__ = 0.0 , lowercase__ = True , lowercase__ = 0 , lowercase__ = 1 , lowercase__ = 2 , **lowercase__ , ):
snake_case_ : Dict = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Tuple = encoder_ffn_dim
snake_case_ : Optional[Any] = num_encoder_layers
snake_case_ : Dict = num_encoder_attention_heads
snake_case_ : Dict = decoder_ffn_dim
snake_case_ : Union[str, Any] = num_decoder_layers
snake_case_ : Tuple = num_decoder_attention_heads
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = init_std # Normal(0, this parameter)
snake_case_ : Optional[Any] = activation_function
# parameters for xlmprophetnet
snake_case_ : Dict = ngram
snake_case_ : List[Any] = num_buckets
snake_case_ : Union[str, Any] = relative_max_distance
snake_case_ : Optional[int] = disable_ngram_loss
snake_case_ : Optional[int] = eps
# 3 Types of Dropout
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : Optional[int] = activation_dropout
snake_case_ : Any = dropout
snake_case_ : Tuple = use_cache
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
@property
def __UpperCamelCase (self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __UpperCamelCase (self , lowercase__ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase : Dict = logging.getLogger(__name__)
@dataclass(frozen=_lowerCamelCase )
class __UpperCAmelCase :
__lowercase = 42
__lowercase = 42
__lowercase = None
__lowercase = None
__lowercase = None
@dataclass(frozen=_lowerCamelCase )
class __UpperCAmelCase :
__lowercase = 42
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_ = False , ):
"""simple docstring"""
_snake_case = hans_processors[task]()
_snake_case = os.path.join(
lowerCAmelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowerCAmelCase_ ) , lowerCAmelCase_ , ) , )
_snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_snake_case , _snake_case = label_list[2], label_list[1]
_snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case = cached_features_file + '.lock'
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
_snake_case = torch.load(lowerCAmelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
_snake_case = (
processor.get_dev_examples(lowerCAmelCase_ ) if evaluate else processor.get_train_examples(lowerCAmelCase_ )
)
logger.info('Training examples: %s' , len(lowerCAmelCase_ ) )
_snake_case = hans_convert_examples_to_features(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('Saving features into cached file %s' , lowerCAmelCase_ )
torch.save(self.features , lowerCAmelCase_ )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.features[i]
def lowerCamelCase ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCAmelCase :
__lowercase = 42
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1_28 , lowerCAmelCase_=False , lowerCAmelCase_ = False , ):
"""simple docstring"""
_snake_case = hans_processors[task]()
_snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_snake_case , _snake_case = label_list[2], label_list[1]
_snake_case = label_list
_snake_case = processor.get_dev_examples(lowerCAmelCase_ ) if evaluate else processor.get_train_examples(lowerCAmelCase_ )
_snake_case = hans_convert_examples_to_features(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowerCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_snake_case = tf.data.Dataset.from_generator(
lowerCAmelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.features[i]
def lowerCamelCase ( self ):
"""simple docstring"""
return self.label_list
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def lowerCamelCase ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = []
for i, line in enumerate(lowerCAmelCase_ ):
if i == 0:
continue
_snake_case = '%s-%s' % (set_type, line[0])
_snake_case = line[5]
_snake_case = line[6]
_snake_case = line[7][2:] if line[7].startswith('ex' ) else line[7]
_snake_case = line[0]
examples.append(InputExample(guid=lowerCAmelCase_ , text_a=lowerCAmelCase_ , text_b=lowerCAmelCase_ , label=lowerCAmelCase_ , pairID=lowerCAmelCase_ ) )
return examples
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , ) -> int:
_snake_case = {label: i for i, label in enumerate(__A )}
_snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
_snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding='max_length' , truncation=__A , return_overflowing_tokens=__A , )
_snake_case = label_map[example.label] if example.label in label_map else 0
_snake_case = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
lowercase : Optional[int] = {
"hans": 3,
}
lowercase : Any = {
"hans": HansProcessor,
}
| 495
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A=None , __A=None ) -> int:
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class __UpperCAmelCase :
__lowercase = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__lowercase = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__lowercase = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Benchmark training of model"""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Verbose memory tracing"""} )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Trace memory line by line"""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Save result to a CSV file"""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Save all print statements in a log file"""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Whether to print environment information"""} )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__lowercase = field(
default=f"inference_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__lowercase = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__lowercase = field(
default=f"train_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__lowercase = field(
default=f"train_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__lowercase = field(
default=f"env_info_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__lowercase = field(
default=f"log_{round(time() )}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__lowercase = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , lowerCAmelCase_ , )
def lowerCamelCase ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 495
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase : Tuple ={
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
A_ : Any = AutoencoderKL
A_ : List[Any] = 'sample'
A_ : Optional[Any] = 1e-2
@property
def _A ( self ):
'''simple docstring'''
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def _A ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def _A ( self ):
'''simple docstring'''
return (3, 32, 32)
def _A ( self ):
'''simple docstring'''
a__ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
a__ = self.dummy_input
return init_dict, inputs_dict
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def _A ( self ):
'''simple docstring'''
a__ , a__ = self.prepare_init_args_and_inputs_for_common()
a__ = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
a__ = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
a__ = torch.randn_like(lowerCamelCase )
a__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
a__ = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
a__ = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
a__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
a__ = dict(model.named_parameters() )
a__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def _A ( self ):
'''simple docstring'''
a__ , a__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCamelCase )
a__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _A ( self ):
'''simple docstring'''
a__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
a__ = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
a__ = torch.manual_seed(0 )
else:
a__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
a__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ = image.to(lowerCamelCase )
with torch.no_grad():
a__ = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
a__ = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
a__ = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
a__ = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1e-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self , lowerCamelCase=0 , lowerCamelCase=(4, 3, 512, 512) , lowerCamelCase=False ):
'''simple docstring'''
a__ = torch.floataa if fpaa else torch.floataa
a__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def _A ( self , lowerCamelCase="CompVis/stable-diffusion-v1-4" , lowerCamelCase=False ):
'''simple docstring'''
a__ = """fp16""" if fpaa else None
a__ = torch.floataa if fpaa else torch.floataa
a__ = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="""vae""" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def _A ( self , lowerCamelCase=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model()
a__ = self.get_sd_image(lowerCamelCase )
a__ = self.get_generator(lowerCamelCase )
with torch.no_grad():
a__ = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
a__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def _A ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model(fpaa=lowerCamelCase )
a__ = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
a__ = self.get_generator(lowerCamelCase )
with torch.no_grad():
a__ = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
a__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a__ = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model()
a__ = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
a__ = model(lowerCamelCase ).sample
assert sample.shape == image.shape
a__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def _A ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model()
a__ = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
a__ = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a__ = sample[-1, -2:, :2, -2:].flatten().cpu()
a__ = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def _A ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model(fpaa=lowerCamelCase )
a__ = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
a__ = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a__ = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model(fpaa=lowerCamelCase )
a__ = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
a__ = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a__ = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model()
a__ = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
a__ = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a__ = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def _A ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.get_sd_vae_model()
a__ = self.get_sd_image(lowerCamelCase )
a__ = self.get_generator(lowerCamelCase )
with torch.no_grad():
a__ = model.encode(lowerCamelCase ).latent_dist
a__ = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
a__ = sample[0, -1, -3:, -3:].flatten().cpu()
a__ = torch.tensor(lowerCamelCase )
a__ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 412
| 0
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ["input_values", "attention_mask"]
def __init__( self : Union[str, Any] , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1_6_0_0_0 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : bool = False , _UpperCamelCase : int = 8_0 , _UpperCamelCase : int = 1_6 , _UpperCamelCase : int = 6_4 , _UpperCamelCase : str = "hann_window" , _UpperCamelCase : float = 1.0 , _UpperCamelCase : float = 8_0 , _UpperCamelCase : float = 7_6_0_0 , _UpperCamelCase : float = 1e-10 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = do_normalize
snake_case_ = return_attention_mask
snake_case_ = num_mel_bins
snake_case_ = hop_length
snake_case_ = win_length
snake_case_ = win_function
snake_case_ = frame_signal_scale
snake_case_ = fmin
snake_case_ = fmax
snake_case_ = mel_floor
snake_case_ = reduction_factor
snake_case_ = win_length * sampling_rate // 1_0_0_0
snake_case_ = hop_length * sampling_rate // 1_0_0_0
snake_case_ = optimal_fft_length(self.sample_size )
snake_case_ = (self.n_fft // 2) + 1
snake_case_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCamelCase )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case__( _UpperCamelCase : List[np.ndarray] , _UpperCamelCase : List[np.ndarray] , _UpperCamelCase : float = 0.0 ) ->List[np.ndarray]:
if attention_mask is not None:
snake_case_ = np.array(_UpperCamelCase , np.intaa )
snake_case_ = []
for vector, length in zip(_UpperCamelCase , attention_mask.sum(-1 ) ):
snake_case_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
snake_case_ = padding_value
normed_input_values.append(_UpperCamelCase )
else:
snake_case_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case__( self : str , _UpperCamelCase : np.ndarray , ) ->np.ndarray:
snake_case_ = spectrogram(
_UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Any , _UpperCamelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCamelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : List[str] , ) ->BatchFeature:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
snake_case_ = self._process_audio(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , )
else:
snake_case_ = None
if audio_target is not None:
snake_case_ = self._process_audio(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , )
if inputs is None:
return inputs_target
else:
snake_case_ = inputs_target['''input_values''']
snake_case_ = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
snake_case_ = decoder_attention_mask
return inputs
def snake_case__( self : Optional[int] , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : bool = False , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : int , ) ->BatchFeature:
snake_case_ = isinstance(_UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
snake_case_ = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
snake_case_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [speech]
# needed to make pad() work on spectrogram inputs
snake_case_ = self.feature_size
# convert into correct format for padding
if is_target:
snake_case_ = [self._extract_mel_features(_UpperCamelCase ) for waveform in speech]
snake_case_ = BatchFeature({'''input_values''': features} )
snake_case_ = self.num_mel_bins
else:
snake_case_ = BatchFeature({'''input_values''': speech} )
snake_case_ = self.pad(
_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = feature_size_hack
# convert input values to correct format
snake_case_ = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
snake_case_ = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
snake_case_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
snake_case_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
snake_case_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case_ = [np.asarray(_UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
snake_case_ = (
attention_mask
if self._get_padding_strategies(_UpperCamelCase , max_length=_UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case_ = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
snake_case_ = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
def snake_case__( self : Tuple ) ->Dict[str, Any]:
snake_case_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
snake_case_ = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 39
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = None
_a = None
_a = None
_a = None
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=512 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = project_dim
UpperCamelCase__ :List[str] = pooler_fn
UpperCamelCase__ :Dict = learn_encoder
UpperCamelCase__ :Any = use_attention_mask
class lowercase ( A__ ):
"""simple docstring"""
_a = [R'pooler', R'logit_scale']
_a = [R'position_ids', R'predictions.decoder.bias']
_a = 'roberta'
_a = RobertaSeriesConfig
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = XLMRobertaModel(UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ :Union[str, Any] = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ )
if self.has_pre_transformation:
UpperCamelCase__ :Any = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ :str = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCAmelCase__ ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ :str = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
UpperCamelCase__ :Dict = outputs['''hidden_states'''][-2]
UpperCamelCase__ :Optional[int] = self.pre_LN(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase__ :List[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 189
| 0
|
"""simple docstring"""
def _A (__a = 50 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 719
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : int = 10_24
SCREAMING_SNAKE_CASE_ : Dict = 40_96
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : int = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE_ : List[str] = [2_56, 5_12, 10_24, 10_24]
SCREAMING_SNAKE_CASE_ : Optional[int] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = 7_68
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [2_56, 5_12, 7_68, 7_68]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_50
SCREAMING_SNAKE_CASE_ : str = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = (1, 3_84, 3_84)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = '''project'''
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Dict = 7_68
SCREAMING_SNAKE_CASE_ : Tuple = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : List[str] = 1_50
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 16
SCREAMING_SNAKE_CASE_ : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE_ : List[str] = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : Dict = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Any = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def _A (__a ) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE_ : Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE_ : int = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _A (__a , __a ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[-config.hidden_size :]
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _A (__a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = get_dpt_config(__a )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
SCREAMING_SNAKE_CASE_ : int = torch.load(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : Dict = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE_ : Dict = 4_80 if '''ade''' in checkpoint_url else 3_84
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor(size=__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(__a , return_tensors='''pt''' )
# forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
if show_prediction:
SCREAMING_SNAKE_CASE_ : List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__a , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 176
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
while i * i <= n:
_SCREAMING_SNAKE_CASE = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 591
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : List[str] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ : Dict = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
UpperCamelCase__ : str = "▁"
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids']
SCREAMING_SNAKE_CASE = FNetTokenizer
def __init__( self , A__=None , A__=None , A__=False , A__=True , A__=True , A__="<unk>" , A__="[SEP]" , A__="<pad>" , A__="[CLS]" , A__="[MASK]" , **A__ , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 591
| 1
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCAmelCase_ : Union[str, Any] = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCAmelCase_ : Union[str, Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowercase__ :
def __init__( self ):
lowerCAmelCase_ : Optional[int] = WATERMARK_BITS
lowerCAmelCase_ : Dict = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def UpperCAmelCase__ ( self , _lowercase ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
lowerCAmelCase_ : Any = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase_ : int = [self.encoder.encode(_UpperCAmelCase , """dwtDct""" ) for image in images]
lowerCAmelCase_ : Any = torch.from_numpy(np.array(_UpperCAmelCase ) ).permute(0 , 3 , 1 , 2 )
lowerCAmelCase_ : Dict = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 713
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 440
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=lowercase )
lowerCamelCase_ = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(lowercase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 70
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : List[str] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_a : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def UpperCamelCase__ ( _A: Dict , _A: Optional[int] , _A: Union[str, Any] , _A: str ):
'''simple docstring'''
__lowerCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__lowerCamelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , _A , )
is not None
):
__lowerCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCamelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCamelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCamelCase = True
if not attribute_used:
__lowerCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCamelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCamelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase__ ( _A: Dict ):
'''simple docstring'''
__lowerCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCamelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCamelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCamelCase = inspect.getsourcefile(_A )
__lowerCamelCase = os.path.dirname(_A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCamelCase = [os.path.join(_A , _A ) for fn in os.listdir(_A ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCamelCase = []
for path in modeling_paths:
if os.path.isfile(_A ):
with open(_A ) as fp:
modeling_sources.append(fp.read() )
__lowerCamelCase = []
for config_param, default_value in zip(_A , _A ):
# `attributes` here is all the variant names for `config_param`
__lowerCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_A , _A , _A , _A ):
unused_attributes.append(attributes[0] )
return sorted(_A )
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _A : inspect.isclass(_A )
and issubclass(_A , _A )
and inspect.getmodule(_A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCamelCase = check_config_attributes_being_used(_A )
if len(_A ) > 0:
__lowerCamelCase = unused_attributes
if len(_A ) > 0:
__lowerCamelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_A )
if __name__ == "__main__":
check_config_attributes()
| 479
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def __lowerCamelCase ( __a : Dict , __a : Any , __a : Optional[int] , __a : Dict , __a : str=0 ) -> str:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowercase =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_lowercase =os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__a , __a )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__a , __a )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase =os.path.join(__a , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__a , exist_ok=__a )
logger.info(f'''Saving model to {ckpt_dir}''' )
_lowercase ={"model": state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def __lowerCamelCase ( __a : Union[str, Any] , __a : Tuple , __a : str , __a : Optional[int] , __a : List[Any]=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
_lowercase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading model from {input_model_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading model from {input_model_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase =(
os.path.join(__a , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
_lowercase ={"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
_lowercase =state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__a )
def __lowerCamelCase ( __a : Tuple , __a : int , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Any=0 ) -> str:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowercase =FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowercase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__a , __a )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
_lowercase =os.path.join(__a , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__a , exist_ok=__a )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __lowerCamelCase ( __a : str , __a : Any , __a : Any , __a : Any , __a : str , __a : List[Any]=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowercase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_lowercase =(
os.path.join(__a , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
_lowercase =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__a ) , )
_lowercase =optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
_lowercase =FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 594
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 100 ):
"""simple docstring"""
lowerCAmelCase__ : Any = set()
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : List[str] = n + 1 # maximum limit
for a in range(2 , UpperCamelCase ):
for b in range(2 , UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = a**b # calculates the current power
collect_powers.add(UpperCamelCase ) # adds the result to the set
return len(UpperCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 565
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 600851475143 ):
"""simple docstring"""
try:
lowerCAmelCase__ : Union[str, Any] = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : int = 2
while i * i <= n:
while n % i == 0:
lowerCAmelCase__ : str = i
n //= i
i += 1
if n > 1:
lowerCAmelCase__ : List[str] = n
return int(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565
| 1
|
import string
import numpy
def lowerCamelCase( a__ ,a__):
return b if a == 0 else greatest_common_divisor(b % a ,a__)
class A__ :
UpperCAmelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase = numpy.vectorize(lambda UpperCamelCase__ : x % 36 )
UpperCAmelCase = numpy.vectorize(UpperCamelCase__ )
def __init__( self : int , _a : numpy.ndarray ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_SCREAMING_SNAKE_CASE =encrypt_key.shape[0]
def __UpperCamelCase ( self : Dict , _a : str ) -> int:
"""simple docstring"""
return self.key_string.index(_a )
def __UpperCamelCase ( self : List[str] , _a : int ) -> str:
"""simple docstring"""
return self.key_string[round(_a )]
def __UpperCamelCase ( self : int ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_SCREAMING_SNAKE_CASE =det % len(self.key_string )
_SCREAMING_SNAKE_CASE =len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
_SCREAMING_SNAKE_CASE =(
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_a )
def __UpperCamelCase ( self : List[str] , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[char for char in text.upper() if char in self.key_string]
_SCREAMING_SNAKE_CASE =chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def __UpperCamelCase ( self : List[str] , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.process_text(text.upper() )
_SCREAMING_SNAKE_CASE =''''''
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_SCREAMING_SNAKE_CASE =text[i : i + self.break_key]
_SCREAMING_SNAKE_CASE =[self.replace_letters(_a ) for char in batch]
_SCREAMING_SNAKE_CASE =numpy.array([vec] ).T
_SCREAMING_SNAKE_CASE =self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
_SCREAMING_SNAKE_CASE =''''''.join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCamelCase ( self : Union[str, Any] ) -> numpy.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_SCREAMING_SNAKE_CASE =det % len(self.key_string )
_SCREAMING_SNAKE_CASE =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_SCREAMING_SNAKE_CASE =i
break
_SCREAMING_SNAKE_CASE =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def __UpperCamelCase ( self : str , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.make_decrypt_key()
_SCREAMING_SNAKE_CASE =self.process_text(text.upper() )
_SCREAMING_SNAKE_CASE =''''''
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_SCREAMING_SNAKE_CASE =text[i : i + self.break_key]
_SCREAMING_SNAKE_CASE =[self.replace_letters(_a ) for char in batch]
_SCREAMING_SNAKE_CASE =numpy.array([vec] ).T
_SCREAMING_SNAKE_CASE =self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
_SCREAMING_SNAKE_CASE =''''''.join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =int(input('''Enter the order of the encryption key: '''))
_SCREAMING_SNAKE_CASE =[]
print('''Enter each row of the encryption key with space separated integers''')
for _ in range(a__):
_SCREAMING_SNAKE_CASE =[int(a__) for x in input().split()]
hill_matrix.append(a__)
_SCREAMING_SNAKE_CASE =HillCipher(numpy.array(a__))
print('''Would you like to encrypt or decrypt some text? (1 or 2)''')
_SCREAMING_SNAKE_CASE =input('''\n1. Encrypt\n2. Decrypt\n''')
if option == "1":
_SCREAMING_SNAKE_CASE =input('''What text would you like to encrypt?: ''')
print('''Your encrypted text is:''')
print(hc.encrypt(a__))
elif option == "2":
_SCREAMING_SNAKE_CASE =input('''What text would you like to decrypt?: ''')
print('''Your decrypted text is:''')
print(hc.decrypt(a__))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 191
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase( a__ ,a__=() ,a__=None ,a__="no" ,a__="29500"):
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
if any(key.startswith('''KAGGLE''') for key in os.environ.keys()):
_SCREAMING_SNAKE_CASE =True
elif "IPython" in sys.modules:
_SCREAMING_SNAKE_CASE ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython())
try:
_SCREAMING_SNAKE_CASE =PrecisionType(mixed_precision.lower())
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.")
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' ,a__) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''')
if num_processes is None:
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,distributed_type='''TPU''')
print(f"Launching a training on {num_processes} TPU cores.")
xmp.spawn(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''')
else:
print('''Launching training on one CPU.''')
function(*a__)
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''')
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''')
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''')
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ ,master_addr='''127.0.01''' ,master_port=a__ ,mixed_precision=a__):
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,distributed_type='''MULTI_GPU''')
print(f"Launching training on {num_processes} GPUs.")
try:
start_processes(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''') from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_SCREAMING_SNAKE_CASE ='''1'''
print('''Launching training on MPS.''')
elif torch.cuda.is_available():
print('''Launching training on one GPU.''')
else:
print('''Launching training on CPU.''')
function(*a__)
def lowerCamelCase( a__ ,a__=() ,a__=2):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ ,master_addr='''127.0.01''' ,master_port='''29500''' ,accelerate_mixed_precision='''no''' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='''yes''' ,):
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,debug=a__)
start_processes(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
| 191
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _a :
"""simple docstring"""
def __init__( self : Any , lowercase_ : Dict , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : List[Any]=99 , lowercase_ : List[str]=[1, 1, 2] , lowercase_ : str=1 , lowercase_ : Union[str, Any]=32 , lowercase_ : Tuple=4 , lowercase_ : Optional[Any]=8 , lowercase_ : Dict=37 , lowercase_ : str="gelu_new" , lowercase_ : List[str]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=512 , lowercase_ : Tuple=3 , lowercase_ : str=0.0_2 , lowercase_ : Optional[Any]=3 , lowercase_ : str=4 , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=False , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = block_sizes
lowercase_ = num_decoder_layers
lowercase_ = d_model
lowercase_ = n_head
lowercase_ = d_head
lowercase_ = d_inner
lowercase_ = hidden_act
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = 2
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase_ = n_head
# Used in the tests to check the size of the first hidden state
lowercase_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase_ = self.num_hidden_layers + 2
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : str , ):
'''simple docstring'''
lowercase_ = TFFunnelModel(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase_ = False
lowercase_ = TFFunnelModel(config=lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase_ = False
lowercase_ = TFFunnelModel(config=lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , ):
'''simple docstring'''
lowercase_ = TFFunnelBaseModel(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowercase_ = False
lowercase_ = TFFunnelBaseModel(config=lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowercase_ = False
lowercase_ = TFFunnelBaseModel(config=lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase__ ( self : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict , ):
'''simple docstring'''
lowercase_ = TFFunnelForPreTraining(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = TFFunnelForMaskedLM(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : int , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : int , lowercase_ : int , ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = TFFunnelForSequenceClassification(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = self.num_choices
lowercase_ = TFFunnelForMultipleChoice(config=lowercase_ )
lowercase_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = TFFunnelForTokenClassification(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , ):
'''simple docstring'''
lowercase_ = TFFunnelForQuestionAnswering(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = TFFunnelModelTester(self )
lowercase_ = ConfigTester(self , config_class=lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class _a ( __a , unittest.TestCase ):
"""simple docstring"""
A_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A_ = False
A_ = False
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = TFFunnelModelTester(self , base=lowercase_ )
lowercase_ = ConfigTester(self , config_class=lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
| 451
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
lowercase_ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
lowercase_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowercase_ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
elif "subsample" in key:
lowercase_ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Dict:
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
lowercase_ = emb.weight.data
return lin_layer
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
lowercase_ = mam_aaa["""args"""]
lowercase_ = mam_aaa["""model"""]
lowercase_ = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
rename_keys(SCREAMING_SNAKE_CASE_ )
lowercase_ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowercase_ = args.share_decoder_input_output_embed
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in args.conv_kernel_sizes.split(""",""" )]
lowercase_ = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(SCREAMING_SNAKE_CASE_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_00 , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE_ , )
lowercase_ = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase_ = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 451
| 1
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = UNetaDModel
_UpperCamelCase : Any = """sample"""
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Dict = 4
a__ : Optional[Any] = 3
a__ : str = (32, 32)
a__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
a__ : Tuple = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
a__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Dict = UNetaDModel
_UpperCamelCase : Union[str, Any] = """sample"""
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = 4
a__ : Union[str, Any] = 4
a__ : Dict = (32, 32)
a__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
a__ : List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
return (4, 32, 32)
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (4, 32, 32)
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
a__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ , a__ : Dict = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case )
a__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , a__ : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case )
model.to(snake_case )
a__ : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ , a__ : int = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
a__ : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ : List[Any] = noise.to(snake_case )
a__ : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
a__ : Any = model_accelerate(snake_case , snake_case )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ : Tuple = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
a__ : Any = model_normal_load(snake_case , snake_case )["sample"]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(snake_case )
a__ : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ : Any = noise.to(snake_case )
a__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
a__ : Dict = model(snake_case , snake_case ).sample
a__ : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ : Optional[Any] = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Dict = UNetaDModel
_UpperCamelCase : List[Any] = """sample"""
@property
def _snake_case ( self , snake_case=(32, 32) ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = 4
a__ : Optional[Any] = 3
a__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
a__ : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
@property
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
a__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , a__ : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case )
a__ : int = self.dummy_input
a__ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
a__ : List[str] = noise
a__ : Any = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(snake_case )
a__ : Optional[Any] = 4
a__ : Any = 3
a__ : Union[str, Any] = (256, 256)
a__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
a__ : str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
a__ : Optional[int] = model(snake_case , snake_case ).sample
a__ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ : Dict = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(snake_case )
a__ : List[Any] = 4
a__ : Union[str, Any] = 3
a__ : str = (32, 32)
a__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
a__ : str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
a__ : List[Any] = model(snake_case , snake_case ).sample
a__ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ : Tuple = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass
| 629
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """sew-d"""
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__=2 , a__=512 , a__=256 , a__=True , a__=True , a__=("p2c", "c2p") , a__="layer_norm" , a__="gelu_python" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.1 , a__=0.02 , a__=1e-7 , a__=1e-5 , a__="group" , a__="gelu" , a__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a__=False , a__=128 , a__=16 , a__=True , a__=0.05 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__="mean" , a__=False , a__=False , a__=256 , a__=0 , a__=1 , a__=2 , **a__ , ):
"""simple docstring"""
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__)
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : int = feat_extract_norm
_lowerCamelCase : Tuple = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(a__)
_lowerCamelCase : Tuple = list(a__)
_lowerCamelCase : str = list(a__)
_lowerCamelCase : Tuple = conv_bias
_lowerCamelCase : List[Any] = num_conv_pos_embeddings
_lowerCamelCase : List[str] = num_conv_pos_embedding_groups
_lowerCamelCase : Optional[int] = len(self.conv_dim)
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Dict = squeeze_factor
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = position_buckets
_lowerCamelCase : List[str] = share_att_key
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : int = norm_rel_ebd
_lowerCamelCase : Optional[Any] = list(a__)
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Dict = hidden_dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : int = feat_proj_dropout
_lowerCamelCase : Union[str, Any] = final_dropout
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : Optional[Any] = feature_layer_norm_eps
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Union[str, Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : str = apply_spec_augment
_lowerCamelCase : str = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : List[str] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[int] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Any = ctc_loss_reduction
_lowerCamelCase : Optional[Any] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : Any = classifier_proj_size
@property
def __snake_case ( self):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 114
|
import copy
import random
from transformers import CLIPTokenizer
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
super().__init__(*a__ , **a__)
_lowerCamelCase : Optional[Any] = {}
def __snake_case ( self , a__ , *a__ , **a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = super().add_tokens(a__ , *a__ , **a__)
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''')
def __snake_case ( self , a__ , *a__ , a__=1 , **a__):
"""simple docstring"""
_lowerCamelCase : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(a__ , *a__ , **a__)
output.append(a__)
else:
_lowerCamelCase : Dict = []
for i in range(a__):
_lowerCamelCase : Optional[Any] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(a__ , *a__ , **a__)
output.append(a__)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""")
_lowerCamelCase : int = output
def __snake_case ( self , a__ , a__=False , a__=1.0):
"""simple docstring"""
if isinstance(a__ , a__):
_lowerCamelCase : Tuple = []
for i in range(len(a__)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a__))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCamelCase : Union[str, Any] = self.token_map[placeholder_token]
_lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(a__) * prop_tokens_to_load)]
if vector_shuffle:
_lowerCamelCase : int = copy.copy(a__)
random.shuffle(a__)
_lowerCamelCase : List[str] = text.replace(a__ , ''' '''.join(a__))
return text
def __call__( self , a__ , *a__ , a__=False , a__=1.0 , **a__):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__) , *a__ , **a__ , )
def __snake_case ( self , a__ , *a__ , a__=False , a__=1.0 , **a__):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__) , *a__ , **a__ , )
| 114
| 1
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__lowerCamelCase = ['small', 'medium', 'large']
__lowerCamelCase = 'lm_head.decoder.weight'
__lowerCamelCase = 'lm_head.weight'
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = torch.load(__snake_case )
_UpperCAmelCase : str = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
__lowerCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__lowerCamelCase = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__lowerCamelCase = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328
| 0
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__A : Dict = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__A : Optional[int] = parser.parse_args()
if args.check_lib:
__A : Dict = importlib.import_module('transformers')
__A : str = Path(transformers_module.__file__).parent
else:
__A : Tuple = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 575
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class lowercase__ ( snake_case__ ):
def __init__( self : List[Any] , *snake_case__ : Dict , **snake_case__ : List[str] ):
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple=None , snake_case__ : Tuple=None , snake_case__ : Any=None ):
lowerCamelCase_ : List[str] ={}
lowerCamelCase_ : List[Any] ={}
if prompt is not None:
lowerCamelCase_ : Union[str, Any] =prompt
if generate_kwargs is not None:
lowerCamelCase_ : List[str] =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ : Optional[int] ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
lowerCamelCase_ : Dict =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : Optional[Any] ):
return super().__call__(snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Dict=None ):
lowerCamelCase_ : Any =load_image(snake_case__ )
if prompt is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"""Received an invalid text input, got - {type(snake_case__ )} - but expected a single string. """
"Note also that one single text can be provided for conditional image to text generation." )
lowerCamelCase_ : Optional[int] =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ : Optional[int] =self.image_processor(images=snake_case__ , return_tensors=self.framework )
lowerCamelCase_ : Union[str, Any] =self.tokenizer(text=snake_case__ , add_special_tokens=snake_case__ ).input_ids
lowerCamelCase_ : str =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ : Optional[Any] =torch.tensor(snake_case__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ : Union[str, Any] =self.image_processor(images=snake_case__ , header_text=snake_case__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ : Union[str, Any] =self.image_processor(images=snake_case__ , return_tensors=self.framework )
lowerCamelCase_ : Dict =self.tokenizer(snake_case__ , return_tensors=self.framework )
model_inputs.update(snake_case__ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowerCamelCase_ : Optional[int] =self.image_processor(images=snake_case__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ : Union[str, Any] =None
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any , snake_case__ : Dict=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , snake_case__ )
and all(x is None for x in model_inputs["input_ids"] )
):
lowerCamelCase_ : Tuple =None
if generate_kwargs is None:
lowerCamelCase_ : List[Any] ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ : str =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ : List[Any] =self.model.generate(snake_case__ , **snake_case__ , **snake_case__ )
return model_outputs
def UpperCAmelCase__ ( self : str , snake_case__ : Any ):
lowerCamelCase_ : Optional[Any] =[]
for output_ids in model_outputs:
lowerCamelCase_ : Tuple ={
"generated_text": self.tokenizer.decode(
snake_case__ , skip_special_tokens=snake_case__ , )
}
records.append(snake_case__ )
return records
| 153
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = args.log_outputs
UpperCamelCase__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCamelCase__ = load_metric('wer' )
UpperCamelCase__ = load_metric('cer' )
# compute metrics
UpperCamelCase__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCamelCase__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCamelCase__ = F'WER: {wer_result}\nCER: {cer_result}'
print(UpperCAmelCase__ )
with open(F'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(UpperCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase__ = F'log_{dataset_id}_predictions.txt'
UpperCamelCase__ = F'log_{dataset_id}_targets.txt'
with open(UpperCAmelCase__ , 'w' ) as p, open(UpperCAmelCase__ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
p.write(F'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(UpperCAmelCase__ , with_indices=UpperCAmelCase__ )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase__ = re.sub(UpperCAmelCase__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCamelCase__ = ' '.join(text.split(UpperCAmelCase__ ) )
return text
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase__ = feature_extractor.sampling_rate
# resample audio
UpperCamelCase__ = dataset.cast_column('audio' , Audio(sampling_rate=UpperCAmelCase__ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase__ = 0 if torch.cuda.is_available() else -1
UpperCamelCase__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase__ = prediction['text']
UpperCamelCase__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCamelCase__ = dataset.map(UpperCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
A__ : int= argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `\'en\'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `\'test\'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
A__ : List[Any]= parser.parse_args()
main(args)
| 713
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowercase : Any = '''\
Text data.
Second line of data.'''
__lowercase : Tuple = '''file'''
@pytest.fixture(scope="""session""" )
def lowercase ( __A : List[str] ) -> int:
'''simple docstring'''
snake_case : Dict = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
snake_case : Optional[int] = bytes(__A , """utf-8""" )
with zstd.open(__A , """wb""" ) as f:
f.write(__A )
return path
@pytest.fixture
def lowercase ( __A : Dict ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __A ) , """w""" ) as f:
f.write(__A )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def lowercase ( __A : Tuple , __A : Dict , __A : Optional[int] , __A : Dict , __A : List[str] , __A : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
snake_case : List[str] = input_paths[compression_format]
snake_case : str = tmp_path / """cache"""
snake_case : Dict = DownloadConfig(cache_dir=__A , extract_compressed_file=__A )
snake_case : Optional[int] = cached_path(__A , download_config=__A )
with open(__A ) as f:
snake_case : Optional[Any] = f.read()
with open(__A ) as f:
snake_case : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def lowercase ( __A : List[str] , __A : Tuple , __A : int , __A : Dict , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = """custom_cache"""
snake_case : List[str] = """custom_extracted_dir"""
snake_case : List[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
snake_case : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __A )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__A ) )
snake_case : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case : Optional[Any] = xz_file
snake_case : Union[str, Any] = (
DownloadConfig(extract_compressed_file=__A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__A )
)
snake_case : str = cached_path(__A , download_config=__A )
assert Path(__A ).parent.parts[-2:] == expected
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : List[str] = str(Path(__A ).resolve() )
assert cached_path(__A ) == text_file
# relative path
snake_case : Dict = str(Path(__A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__A ) == text_file
def lowercase ( __A : Dict ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(__A ):
cached_path(__A )
# relative path
snake_case : Tuple = """./__missing_file__.txt"""
with pytest.raises(__A ):
cached_path(__A )
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(__A ) as f:
snake_case : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( ) -> List[str]:
'''simple docstring'''
with pytest.raises(__A ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( __A : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__A ):
http_get("""https://huggingface.co""" , temp_file=__A )
with pytest.raises(__A ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( __A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__A ):
ftp_get("""ftp://huggingface.co""" , temp_file=__A )
with pytest.raises(__A ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __A )
def lowercase ( __A : Optional[int] ) -> str:
'''simple docstring'''
snake_case : int = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__A ):
fsspec_get("""s3://huggingface.co""" , temp_file=__A )
with pytest.raises(__A ):
fsspec_head("""s3://huggingface.co""" )
| 36
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''pixel_values'''
__A : List[Any] = False
__A : List[str] = TimmBackboneConfig
def __init__( self , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , 'timm')
super().__init__(lowercase)
a__ : List[str] = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.')
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.')
if hasattr(lowercase , 'out_features') and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.')
a__ : List[Any] = getattr(lowercase , 'use_pretrained_backbone' , lowercase)
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.')
# We just take the final layer by default. This matches the default for the transformers models.
a__ : Optional[Any] = config.out_indices if getattr(lowercase , 'out_indices' , lowercase) is not None else (-1,)
a__ : Dict = timm.create_model(
config.backbone , pretrained=lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowercase , **lowercase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a__ : Tuple = self._backbone.return_layers
a__ : int = {layer['module']: str(lowercase) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(lowercase)
@classmethod
def __lowercase ( cls , lowercase , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'])
from ...models.timm_backbone import TimmBackboneConfig
a__ : List[Any] = kwargs.pop('config' , TimmBackboneConfig())
a__ : Any = kwargs.pop('use_timm_backbone' , lowercase)
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones')
a__ : List[str] = kwargs.pop('num_channels' , config.num_channels)
a__ : str = kwargs.pop('features_only' , config.features_only)
a__ : List[str] = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone)
a__ : int = kwargs.pop('out_indices' , config.out_indices)
a__ : Union[str, Any] = TimmBackboneConfig(
backbone=lowercase , num_channels=lowercase , features_only=lowercase , use_pretrained_backbone=lowercase , out_indices=lowercase , )
return super()._from_config(lowercase , **lowercase)
def __lowercase ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
a__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment')
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a__ : Optional[int] = self._all_layers
a__ : int = self._backbone(lowercase , **lowercase)
a__ : List[Any] = self._return_layers
a__ : List[Any] = tuple(hidden_states[i] for i in self.out_indices)
else:
a__ : str = self._backbone(lowercase , **lowercase)
a__ : List[str] = None
a__ : Optional[Any] = tuple(lowercase)
a__ : Optional[int] = tuple(lowercase) if hidden_states is not None else None
if not return_dict:
a__ : Dict = (feature_maps,)
if output_hidden_states:
a__ : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowercase , hidden_states=lowercase , attentions=lowercase)
| 302
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : int, lowerCamelCase : Union[str, Any]=13, lowerCamelCase : Dict=32, lowerCamelCase : Tuple=3, lowerCamelCase : List[str]=4, lowerCamelCase : int=[10, 20, 30, 40], lowerCamelCase : int=[2, 2, 3, 2], lowerCamelCase : List[str]=True, lowerCamelCase : int=True, lowerCamelCase : List[Any]=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[Any]=10, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : int=[2, 3, 4], lowerCamelCase : Optional[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = num_stages
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = scope
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=_a, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase__ ( self : List[Any], lowerCamelCase : List[str], lowerCamelCase : str, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = ConvNextModel(config=_a )
model.to(_a )
model.eval()
lowercase__ = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
lowercase__ = model(_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
lowercase__ = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
lowercase__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCamelCase__ ,UpperCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ConvNextModelTester(self )
lowercase__ = ConfigTester(self, config_class=_a, has_text_modality=_a, hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def lowercase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], _a )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def lowercase__ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : int ):
lowercase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_a, _a ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_a ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_a, _a, _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_a, _a, _a )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_a, return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
lowercase__ = model(**_a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, _a )
lowercase__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _a, atol=1E-4 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ,UpperCamelCase__ ):
"""simple docstring"""
lowercase__ = (ConvNextBackbone,) if is_torch_available() else ()
lowercase__ = ConvNextConfig
lowercase__ = False
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = ConvNextModelTester(self )
| 710
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[Any] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowercase__ : Any = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(__lowerCAmelCase )
from datasets import load_dataset
lowercase__ : List[str] = load_dataset('''nielsr/rvlcdip-demo''' )
lowercase__ : Tuple = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowercase__ : str = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : int = model(**__lowerCAmelCase )
lowercase__ : int = outputs.logits
lowercase__ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Any = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 152
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Optional[Any] = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ : str = 'src/transformers'
A_ : Union[str, Any] = 'docs/source/en/tasks'
def UpperCamelCase (lowercase_: List[Any] , lowercase_: List[str] , lowercase_: List[Any] ) -> Tuple:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : Dict = f.readlines()
# Find the start prompt.
A__ : str = 0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
A__ : List[str] = start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ : Dict = direct_transformers_import(TRANSFORMERS_PATH)
A_ : Dict = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ : Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[str]:
A__ : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide]
A__ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() )
A__ : Optional[int] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase (lowercase_: Tuple , lowercase_: int=False ) -> Any:
A__ : List[str] = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
A__ : str = get_model_list_for_task(_lowerCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 705
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _SCREAMING_SNAKE_CASE( A , A ):
SCREAMING_SNAKE_CASE_ : Dict = '''pixel_values'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Dict = TimmBackboneConfig
def __init__( self ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
requires_backends(self ,'''timm''' )
super().__init__(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(SCREAMING_SNAKE_CASE__ ,'''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
__SCREAMING_SNAKE_CASE :str = getattr(SCREAMING_SNAKE_CASE__ ,'''use_pretrained_backbone''' ,SCREAMING_SNAKE_CASE__ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
__SCREAMING_SNAKE_CASE :Tuple = config.out_indices if getattr(SCREAMING_SNAKE_CASE__ ,'''out_indices''' ,SCREAMING_SNAKE_CASE__ ) is not None else (-1,)
__SCREAMING_SNAKE_CASE :int = timm.create_model(
config.backbone ,pretrained=SCREAMING_SNAKE_CASE__ ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__SCREAMING_SNAKE_CASE :Optional[int] = self._backbone.return_layers
__SCREAMING_SNAKE_CASE :str = {layer['''module''']: str(SCREAMING_SNAKE_CASE__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls ,SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
requires_backends(cls ,['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
__SCREAMING_SNAKE_CASE :Optional[Any] = kwargs.pop('''config''' ,TimmBackboneConfig() )
__SCREAMING_SNAKE_CASE :Union[str, Any] = kwargs.pop('''use_timm_backbone''' ,SCREAMING_SNAKE_CASE__ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
__SCREAMING_SNAKE_CASE :Dict = kwargs.pop('''num_channels''' ,config.num_channels )
__SCREAMING_SNAKE_CASE :Dict = kwargs.pop('''features_only''' ,config.features_only )
__SCREAMING_SNAKE_CASE :str = kwargs.pop('''use_pretrained_backbone''' ,config.use_pretrained_backbone )
__SCREAMING_SNAKE_CASE :Optional[Any] = kwargs.pop('''out_indices''' ,config.out_indices )
__SCREAMING_SNAKE_CASE :Any = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE__ ,num_channels=SCREAMING_SNAKE_CASE__ ,features_only=SCREAMING_SNAKE_CASE__ ,use_pretrained_backbone=SCREAMING_SNAKE_CASE__ ,out_indices=SCREAMING_SNAKE_CASE__ ,)
return super()._from_config(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE :str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE :Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__SCREAMING_SNAKE_CASE :Any = self._all_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = self._backbone(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self._return_layers
__SCREAMING_SNAKE_CASE :List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
__SCREAMING_SNAKE_CASE :Any = self._backbone(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = None
__SCREAMING_SNAKE_CASE :Any = tuple(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = tuple(SCREAMING_SNAKE_CASE__ ) if hidden_states is not None else None
if not return_dict:
__SCREAMING_SNAKE_CASE :List[Any] = (feature_maps,)
if output_hidden_states:
__SCREAMING_SNAKE_CASE :int = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,attentions=SCREAMING_SNAKE_CASE__ )
| 498
|
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 50 ) -> int:
__SCREAMING_SNAKE_CASE :List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 498
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = F'''{sampling_rate}'''
lowercase__ = '1'
lowercase__ = 'f32le'
lowercase__ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _lowerCAmelCase ( A__ , A__ , A__ = "f32le" , ):
lowercase__ = F'''{sampling_rate}'''
lowercase__ = '1'
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = 'alsa'
lowercase__ = 'default'
elif system == "Darwin":
lowercase__ = 'avfoundation'
lowercase__ = ':0'
elif system == "Windows":
lowercase__ = 'dshow'
lowercase__ = 'default'
lowercase__ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def _lowerCAmelCase ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ):
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item['raw'] , dtype=A__ )
lowercase__ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _lowerCAmelCase ( A__ , A__ , A__ , A__ = False ):
lowercase__ = B''
lowercase__, lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
lowercase__ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 708
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 0
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : str = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
__A : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
__A : Tuple = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : Any ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __lowercase ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Dict=None , lowerCamelCase : str=None , lowerCamelCase : Optional[Any]="auto" , lowerCamelCase : Tuple=-1 , lowerCamelCase : Union[str, Any]=0.9 , lowerCamelCase : str=5 , lowerCamelCase : List[str]=5_00 , lowerCamelCase : Optional[int]="gpt2-large" , lowerCamelCase : List[str]=-1 , lowerCamelCase : str=10_24 , lowerCamelCase : Optional[int]=25 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=25 , ) -> str:
lowerCAmelCase_ : Any = compute_mauve(
p_text=lowerCamelCase , q_text=lowerCamelCase , p_features=lowerCamelCase , q_features=lowerCamelCase , p_tokens=lowerCamelCase , q_tokens=lowerCamelCase , num_buckets=lowerCamelCase , pca_max_data=lowerCamelCase , kmeans_explained_var=lowerCamelCase , kmeans_num_redo=lowerCamelCase , kmeans_max_iter=lowerCamelCase , featurize_model_name=lowerCamelCase , device_id=lowerCamelCase , max_text_length=lowerCamelCase , divergence_curve_discretization_size=lowerCamelCase , mauve_scaling_factor=lowerCamelCase , verbose=lowerCamelCase , seed=lowerCamelCase , )
return out
| 275
|
'''simple docstring'''
from collections.abc import Callable
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Callable | None = None ) -> None:
# Stores actual heap items.
lowerCAmelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase_ : dict = {}
# Stores current size of heap.
lowerCAmelCase_ : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase_ : Tuple = key or (lambda lowerCamelCase : x)
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.arr[j], self.arr[i]
def __lowercase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : int , lowerCamelCase : int ) -> int:
lowerCAmelCase_ : List[str] = self._left(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self._right(lowerCamelCase )
lowerCAmelCase_ : Tuple = i
if left is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : int = left
if right is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = right
return valid_parent
def __lowercase ( self : List[Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Tuple = self._parent(lowerCamelCase )
while parent is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : str = parent, self._parent(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Optional[Any] = self._get_valid_parent(lowerCamelCase )
while valid_parent != index:
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : int = valid_parent, self._get_valid_parent(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : Dict = self.pos_map[item]
lowerCAmelCase_ : Dict = [item, self.key(lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : List[str] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase_ : Tuple = self.arr[self.size - 1]
lowerCAmelCase_ : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase )] )
else:
lowerCAmelCase_ : str = [item, self.key(lowerCamelCase )]
lowerCAmelCase_ : Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : str ) -> tuple | None:
return self.arr[0] if self.size else None
def __lowercase ( self : Optional[Any] ) -> tuple | None:
lowerCAmelCase_ : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
lowercase = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
lowercase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase = F'''down_blocks.{i}.resnets.{j}.'''
lowercase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase = F'''down_blocks.{i}.attentions.{j}.'''
lowercase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase = F'''up_blocks.{i}.resnets.{j}.'''
lowercase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase = F'''up_blocks.{i}.attentions.{j}.'''
lowercase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowercase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase = F'''up_blocks.{i}.upsamplers.0.'''
lowercase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase = "mid_block.attentions.0."
lowercase = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase = F'''mid_block.resnets.{j}.'''
lowercase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
_UpperCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCAmelCase = v.replace(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCAmelCase = v.replace(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase = v
_UpperCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowercase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase = F'''down_blocks.{i}.downsamplers.0.'''
lowercase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase = F'''up_blocks.{i}.upsamplers.0.'''
lowercase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowercase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase = F'''mid_block.resnets.{i}.'''
lowercase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCAmelCase = v.replace(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCAmelCase = v.replace(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase = v
_UpperCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCAmelCase = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
_UpperCAmelCase = reshape_weight_for_sd(_lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
lowercase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase = {"q": 0, "k": 1, "v": 2}
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
_UpperCAmelCase = k[: -len('.q_proj.weight' )]
_UpperCAmelCase = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
_UpperCAmelCase = [None, None, None]
_UpperCAmelCase = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
_UpperCAmelCase = k[: -len('.q_proj.bias' )]
_UpperCAmelCase = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
_UpperCAmelCase = [None, None, None]
_UpperCAmelCase = v
continue
_UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_UpperCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_UpperCAmelCase = torch.cat(_lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_UpperCAmelCase = torch.cat(_lowerCamelCase )
return new_state_dict
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowercase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowercase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowercase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase = load_file(unet_path, device='''cpu''')
else:
lowercase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowercase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowercase = load_file(vae_path, device='''cpu''')
else:
lowercase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowercase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowercase = load_file(text_enc_path, device='''cpu''')
else:
lowercase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowercase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowercase = convert_unet_state_dict(unet_state_dict)
lowercase = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase = convert_vae_state_dict(vae_state_dict)
lowercase = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase = {"transformer." + k: v for k, v in text_enc_dict.items()}
lowercase = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
lowercase = convert_text_enc_state_dict(text_enc_dict)
lowercase = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 703
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24
| 0
|
import numpy as np
def lowerCAmelCase_ ( __a , __a , __a = 1e-12 , __a = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowerCamelCase ) == np.iscomplexobj(__lowerCamelCase )
lowerCamelCase__: List[str] =np.iscomplexobj(__lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase__: Optional[int] =False
lowerCamelCase__: Optional[Any] =0
lowerCamelCase__: int =0
lowerCamelCase__: Tuple =1e12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase__: int =np.dot(__lowerCamelCase , __lowerCamelCase )
# Normalize the resulting output vector.
lowerCamelCase__: List[str] =w / np.linalg.norm(__lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase__: Optional[Any] =vector.conj().T if is_complex else vector.T
lowerCamelCase__: str =np.dot(__lowerCamelCase , np.dot(__lowerCamelCase , __lowerCamelCase ) )
# Check convergence.
lowerCamelCase__: int =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: Optional[Any] =lambda_
if is_complex:
lowerCamelCase__: Optional[Any] =np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase__: List[str] =np.array([41, 4, 20] )
lowerCamelCase__: List[Any] =real_input_matrix.astype(np.complexaaa )
lowerCamelCase__: Tuple =np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase__: Tuple =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase__: Optional[Any] =real_input_matrix
lowerCamelCase__: Optional[int] =real_vector
elif problem_type == "complex":
lowerCamelCase__: Any =complex_input_matrix
lowerCamelCase__: int =complex_vector
# Our implementation.
lowerCamelCase__: Optional[int] =power_iteration(__lowerCamelCase , __lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase__: Tuple =np.linalg.eigh(__lowerCamelCase )
# Last eigenvalue is the maximum one.
lowerCamelCase__: List[Any] =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase__: List[str] =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowerCamelCase ) - np.abs(__lowerCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 59
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *_snake_case : Any ,**_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = DepthEstimationPipeline(model=_snake_case ,image_processor=_snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self : str ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,_snake_case )
import datasets
lowercase__ : str = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
lowercase__ : Union[str, Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,_snake_case ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : int = '''Intel/dpt-large'''
lowercase__ : Tuple = pipeline('''depth-estimation''' ,model=_snake_case )
lowercase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase__ : Dict = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.662 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 560
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ : List[str] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase ( __lowerCamelCase = 50_00 ) -> int:
lowercase__ : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCamelCase )]
for i, pentagonal_i in enumerate(__lowerCamelCase ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
lowercase__ : List[str] = pentagonal_nums[j]
lowercase__ : List[str] = pentagonal_i + pentagonal_j
lowercase__ : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(__lowerCamelCase ) and is_pentagonal(__lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__lowerCamelCase , '''_dynamo''' ):
return False
return isinstance(__lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = True ) -> Optional[Any]:
lowercase__ : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ : str = is_compiled_module(__lowerCamelCase )
if is_compiled:
lowercase__ : int = model
lowercase__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
lowercase__ : List[Any] = getattr(__lowerCamelCase , '''forward''' )
lowercase__ : Any = model.__dict__.pop('''_original_forward''' , __lowerCamelCase )
if original_forward is not None:
while hasattr(__lowerCamelCase , '''__wrapped__''' ):
lowercase__ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
lowercase__ : Dict = forward
if getattr(__lowerCamelCase , '''_converted_to_transformer_engine''' , __lowerCamelCase ):
convert_model(__lowerCamelCase , to_transformer_engine=__lowerCamelCase )
if is_compiled:
lowercase__ : Optional[Any] = model
lowercase__ : Tuple = compiled_model
return model
def __UpperCAmelCase ( ) -> int:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCamelCase , __lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCamelCase , __lowerCamelCase )
@contextmanager
def __UpperCAmelCase ( **__lowerCamelCase ) -> Optional[int]:
for key, value in kwargs.items():
lowercase__ : Optional[int] = str(__lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
if not hasattr(__lowerCamelCase , '''__qualname__''' ) and not hasattr(__lowerCamelCase , '''__name__''' ):
lowercase__ : Tuple = getattr(__lowerCamelCase , '''__class__''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__lowerCamelCase , '''__name__''' ):
return obj.__name__
return str(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
for key, value in source.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : int = destination.setdefault(__lowerCamelCase , {} )
merge_dicts(__lowerCamelCase , __lowerCamelCase )
else:
lowercase__ : Optional[int] = value
return destination
def __UpperCAmelCase ( __lowerCamelCase = None ) -> bool:
if port is None:
lowercase__ : List[Any] = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 122
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( lowercase__ ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> Tuple:
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 433
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ : Dict = logging.get_logger(__name__)
a_ : Dict = Dict[str, Any]
a_ : str = List[Prediction]
@add_end_docstrings(lowercase__ )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case_ ( self , **__a ):
__lowerCamelCase : List[str] = {}
if "threshold" in kwargs:
__lowerCamelCase : Optional[int] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *__a , **__a ):
return super().__call__(*__a , **__a )
def snake_case_ ( self , __a ):
__lowerCamelCase : Optional[Any] = load_image(__a )
__lowerCamelCase : Any = torch.IntTensor([[image.height, image.width]] )
__lowerCamelCase : Any = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__lowerCamelCase : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__lowerCamelCase : Dict = target_size
return inputs
def snake_case_ ( self , __a ):
__lowerCamelCase : Union[str, Any] = model_inputs.pop('target_size' )
__lowerCamelCase : Optional[Any] = self.model(**__a )
__lowerCamelCase : Any = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCamelCase : Optional[Any] = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , __a , __a=0.9 ):
__lowerCamelCase : Dict = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCamelCase , __lowerCamelCase : Dict = target_size[0].tolist()
def unnormalize(__a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__lowerCamelCase , __lowerCamelCase : Tuple = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCamelCase : List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCamelCase : Union[str, Any] = [unnormalize(__a ) for bbox in model_outputs['bbox'].squeeze(0 )]
__lowerCamelCase : List[str] = ['score', 'label', 'box']
__lowerCamelCase : Tuple = [dict(zip(__a , __a ) ) for vals in zip(scores.tolist() , __a , __a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCamelCase : Optional[int] = self.image_processor.post_process_object_detection(__a , __a , __a )
__lowerCamelCase : Any = raw_annotations[0]
__lowerCamelCase : Any = raw_annotation['scores']
__lowerCamelCase : Tuple = raw_annotation['labels']
__lowerCamelCase : Union[str, Any] = raw_annotation['boxes']
__lowerCamelCase : List[str] = scores.tolist()
__lowerCamelCase : str = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCamelCase : List[Any] = [self._get_bounding_box(__a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCamelCase : int = ['score', 'label', 'box']
__lowerCamelCase : int = [
dict(zip(__a , __a ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def snake_case_ ( self , __a ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = box.int().tolist()
__lowerCamelCase : Dict = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 594
| 0
|
'''simple docstring'''
import numpy as np
def A ( _UpperCAmelCase : str ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
A_ = {
"google/pegasus-xsum": 5_12,
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PegasusTokenizer
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<mask_2>" , SCREAMING_SNAKE_CASE="<mask_1>" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_03 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError(
F"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE )}, but is"""
F""" {type(SCREAMING_SNAKE_CASE )}""" )
__lowerCAmelCase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__lowerCAmelCase : Tuple = additional_special_tokens_extended
else:
__lowerCAmelCase : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , mask_token_sent=SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = vocab_file
__lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 123
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__lowercase = [[float('''inf''' ) for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
__lowercase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase_ ):
# looping through rows of graph array
for i in range(lowerCamelCase_ ):
# looping through columns of graph array
for j in range(lowerCamelCase_ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase_ , lowerCamelCase_ )
return dist, v
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input('''Enter number of vertices: '''))
_SCREAMING_SNAKE_CASE = int(input('''Enter number of edges: '''))
_SCREAMING_SNAKE_CASE = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
_SCREAMING_SNAKE_CASE = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
_SCREAMING_SNAKE_CASE = int(input('''Enter source:'''))
_SCREAMING_SNAKE_CASE = int(input('''Enter destination:'''))
_SCREAMING_SNAKE_CASE = float(input('''Enter weight:'''))
_SCREAMING_SNAKE_CASE = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 502
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = ["image_processor", "tokenizer"]
a : Optional[int] = "ChineseCLIPImageProcessor"
a : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_lowerCamelCase ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.image_processor
def __call__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(_lowerCamelCase ,return_tensors=_lowerCamelCase ,**_lowerCamelCase )
if images is not None:
__lowercase = self.image_processor(_lowerCamelCase ,return_tensors=_lowerCamelCase ,**_lowerCamelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) ,tensor_type=_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase ,**_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_lowerCamelCase ,)
return self.image_processor_class
| 502
| 1
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PriorTransformer
_lowerCamelCase = "hidden_states"
@property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 7
lowerCamelCase_ = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase__ )
lowerCamelCase_ = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase__ )
lowerCamelCase_ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case ( self , UpperCamelCase=0 ):
"""simple docstring"""
torch.manual_seed(lowerCamelCase__ )
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 7
lowerCamelCase_ = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
lowerCamelCase_ = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
lowerCamelCase_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case ( self ):
"""simple docstring"""
return (4, 8)
@property
def snake_case ( self ):
"""simple docstring"""
return (4, 8)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase__ )
lowerCamelCase_ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ = self.model_class(**lowerCamelCase__ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , lowerCamelCase__ )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowerCamelCase_ = model.to(lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "set_default_attn_processor" ):
model.set_default_attn_processor()
lowerCamelCase_ = self.get_dummy_seed_input()
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )[0]
lowerCamelCase_ = output[0, :5].flatten().cpu()
print(lowerCamelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowerCamelCase_ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1e-2 ) )
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase=1 , UpperCamelCase=768 , UpperCamelCase=77 , UpperCamelCase=0 ):
"""simple docstring"""
torch.manual_seed(lowerCamelCase__ )
lowerCamelCase_ = batch_size
lowerCamelCase_ = embedding_dim
lowerCamelCase_ = num_embeddings
lowerCamelCase_ = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
lowerCamelCase_ = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
lowerCamelCase_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(lowerCamelCase__ )
lowerCamelCase_ = self.get_dummy_seed_input(seed=lowerCamelCase__ )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )[0]
assert list(sample.shape ) == [1, 768]
lowerCamelCase_ = sample[0, :8].flatten().cpu()
print(lowerCamelCase__ )
lowerCamelCase_ = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
| 718
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ : Dict = logging.get_logger(__name__)
a_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Optional[Any] = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
a_ : str = {
"""gpt-neox-20b""": 2048,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = add_prefix_space
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 445
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowerCAmelCase_ ( nn.Module ):
UpperCAmelCase = 42
UpperCAmelCase = (16, 32, 96, 256)
UpperCAmelCase = jnp.floataa
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase = []
for i in range(len(self.block_out_channels ) - 1 ):
_UpperCamelCase = self.block_out_channels[i]
_UpperCamelCase = self.block_out_channels[i + 1]
_UpperCamelCase = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
_UpperCamelCase = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
_UpperCamelCase = blocks
_UpperCamelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , _A : Tuple ):
_UpperCamelCase = self.conv_in(_A )
_UpperCamelCase = nn.silu(_A )
for block in self.blocks:
_UpperCamelCase = block(_A )
_UpperCamelCase = nn.silu(_A )
_UpperCamelCase = self.conv_out(_A )
return embedding
@flax_register_to_config
class lowerCAmelCase_ ( nn.Module, __lowercase, __lowercase ):
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase = False
UpperCAmelCase = (320, 640, 1280, 1280)
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = None
UpperCAmelCase = 1280
UpperCAmelCase = 0.0
UpperCAmelCase = False
UpperCAmelCase = jnp.floataa
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = "rgb"
UpperCAmelCase = (16, 32, 96, 256)
def UpperCamelCase_ ( self : List[Any] , _A : jax.random.KeyArray ):
# init input tensors
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(_A , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCamelCase = jnp.zeros(_A , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(_A )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(_A , dtype=self.dtype )
_UpperCamelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCamelCase = self.only_cross_attention
if isinstance(_A , _A ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
_UpperCamelCase = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
_UpperCamelCase = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
_UpperCamelCase = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
_UpperCamelCase = down_blocks
_UpperCamelCase = controlnet_down_blocks
# mid
_UpperCamelCase = block_out_channels[-1]
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCamelCase = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , _A : Dict , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : float = 1.0 , _A : bool = True , _A : bool = False , ):
_UpperCamelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCamelCase = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(_A , 0 )
_UpperCamelCase = self.time_proj(_A )
_UpperCamelCase = self.time_embedding(_A )
# 2. pre-process
_UpperCamelCase = jnp.transpose(_A , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(_A )
_UpperCamelCase = jnp.transpose(_A , (0, 2, 3, 1) )
_UpperCamelCase = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
_UpperCamelCase , _UpperCamelCase = down_block(_A , _A , _A , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_UpperCamelCase = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
_UpperCamelCase = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
_UpperCamelCase = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = controlnet_down_block_res_samples
_UpperCamelCase = self.controlnet_mid_block(_A )
# 6. scaling
_UpperCamelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 712
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCamelCase = None
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif name.split('''.''' )[0] == "proj":
_UpperCamelCase = fairseq_model.proj
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def _snake_case ( __snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.split(''' ''' )[0] for line in lines]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case )
_UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
_UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case )
_UpperCamelCase = SpeechaTextaForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
_UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
# add projection layer
_UpperCamelCase = nn.Parameter(projection_layer.weight )
_UpperCamelCase = nn.Parameter(projection_layer.bias )
_UpperCamelCase = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''speech_to_text_2'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 71
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self , A_ , A_=2 , A_=32 , A_=16 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=[0, 1, 2, 3] , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.02 , A_=3 , A_=[1, 384, 24, 24] , A_=True , A_=None , ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = parent
_lowercase: List[str] = batch_size
_lowercase: Any = image_size
_lowercase: int = patch_size
_lowercase: str = num_channels
_lowercase: Any = is_training
_lowercase: str = use_labels
_lowercase: Optional[Any] = hidden_size
_lowercase: int = num_hidden_layers
_lowercase: int = backbone_out_indices
_lowercase: Optional[int] = num_attention_heads
_lowercase: int = intermediate_size
_lowercase: Any = hidden_act
_lowercase: List[str] = hidden_dropout_prob
_lowercase: Any = attention_probs_dropout_prob
_lowercase: List[str] = initializer_range
_lowercase: Dict = num_labels
_lowercase: Tuple = backbone_featmap_shape
_lowercase: Any = scope
_lowercase: Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowercase: Optional[int] = (image_size // patch_size) ** 2
_lowercase: Optional[Any] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase: List[str] = None
if self.use_labels:
_lowercase: int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase: Dict = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowercase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowercase: Any = DPTModel(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Any = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.num_labels
_lowercase: Optional[int] = DPTForDepthEstimation(A_ )
model.to(A_ )
model.eval()
_lowercase: str = model(A_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowercase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowercase: Tuple = self.num_labels
_lowercase: List[Any] = DPTForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
_lowercase: List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Optional[int] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase: Union[str, Any] = config_and_inputs
_lowercase: List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Optional[int] = DPTModelTester(self )
_lowercase: Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: int = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase: str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase , _lowercase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: int = model_class(A_ )
_lowercase: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase: Optional[Any] = [*signature.parameters.keys()]
_lowercase: Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase , _lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase: Tuple = True
if model_class in get_values(A_ ):
continue
_lowercase: Any = model_class(A_ )
model.to(A_ )
model.train()
_lowercase: Tuple = self._prepare_for_class(A_ , A_ , return_labels=A_ )
_lowercase: Optional[Any] = model(**A_ ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase , _lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase: List[Any] = False
_lowercase: Optional[int] = True
if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing:
continue
_lowercase: Union[str, Any] = model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
_lowercase: Optional[int] = self._prepare_for_class(A_ , A_ , return_labels=A_ )
_lowercase: Union[str, Any] = model(**A_ ).loss
loss.backward()
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase: Optional[Any] = _config_zero_init(A_ )
for model_class in self.all_model_classes:
_lowercase: List[str] = model_class(config=A_ )
# Skip the check for the backbone
_lowercase: str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowercase: int = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> str:
"""simple docstring"""
pass
@slow
def lowercase_ ( self ) -> int:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowercase: Dict = DPTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase: int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase: Tuple = '''add'''
with self.assertRaises(A_ ):
_lowercase: List[Any] = DPTForDepthEstimation(A_ )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_lowercase: Optional[int] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(A_ )
_lowercase: str = prepare_img()
_lowercase: Union[str, Any] = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
_lowercase: Dict = model(**A_ )
_lowercase: int = outputs.predicted_depth
# verify the predicted depth
_lowercase: str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , A_ )
_lowercase: Optional[Any] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , A_ , atol=1E-4 ) )
| 353
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : int = logging.get_logger(__name__)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
_lowercase: Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_lowercase: List[str] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase: Optional[int] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
_lowercase: int = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase: List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowercase: List[Any] = in_proj_bias[: config.hidden_size]
_lowercase: Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase: List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase: Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase: Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = dct.pop(_UpperCamelCase )
_lowercase: Dict = val
@torch.no_grad()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_UpperCamelCase )
_lowercase: Optional[Any] = False
_lowercase: Optional[Any] = False
_lowercase: Union[str, Any] = False
_lowercase: Union[str, Any] = False
if "vqa" in checkpoint_url:
_lowercase: Union[str, Any] = True
_lowercase: str = 3_129
_lowercase: List[str] = '''huggingface/label-files'''
_lowercase: Tuple = '''vqa2-id2label.json'''
_lowercase: Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: str = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: Tuple = idalabel
_lowercase: Tuple = {v: k for k, v in idalabel.items()}
_lowercase: Any = ViltForQuestionAnswering(_UpperCamelCase )
elif "nlvr" in checkpoint_url:
_lowercase: Any = True
_lowercase: List[str] = 2
_lowercase: Optional[int] = {0: '''False''', 1: '''True'''}
_lowercase: List[Any] = {v: k for k, v in config.idalabel.items()}
_lowercase: Optional[Any] = 3
_lowercase: List[Any] = ViltForImagesAndTextClassification(_UpperCamelCase )
elif "irtr" in checkpoint_url:
_lowercase: List[str] = True
_lowercase: Optional[int] = ViltForImageAndTextRetrieval(_UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowercase: Dict = True
_lowercase: int = ViltForMaskedLM(_UpperCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
_lowercase: List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''state_dict''']
_lowercase: Dict = create_rename_keys(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
if mlm_model or irtr_model:
_lowercase: int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowercase , _lowercase: Optional[int] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_UpperCamelCase )
# Define processor
_lowercase: int = ViltImageProcessor(size=384 )
_lowercase: List[str] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowercase: List[Any] = ViltProcessor(_UpperCamelCase , _UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowercase: int = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw )
_lowercase: List[Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw )
_lowercase: Dict = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
_lowercase: Any = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
_lowercase: Union[str, Any] = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
_lowercase: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowercase: Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=_UpperCamelCase ).raw )
if mlm_model:
_lowercase: int = '''a bunch of [MASK] laying on a [MASK].'''
else:
_lowercase: Optional[int] = '''How many cats are there?'''
_lowercase: Optional[Any] = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
_lowercase: Optional[Any] = model(**_UpperCamelCase )
# Verify outputs
if mlm_model:
_lowercase: List[str] = torch.Size([1, 11, 30_522] )
_lowercase: Tuple = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowercase: Optional[int] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowercase: Optional[int] = torch.Size([1, 3_129] )
_lowercase: List[str] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowercase: List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowercase: Tuple = torch.Size([1, 2] )
_lowercase: Any = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A__ : str = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 353
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowerCamelCase : Any = 1_6
_lowerCamelCase : int = 3_2
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 16 , UpperCamelCase_ : str = "bert-base-cased" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A__ )
_lowerCAmelCase : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : Any = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
_lowerCAmelCase : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
'''simple docstring'''
model.eval()
_lowerCAmelCase : List[Any] = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**A__ )
_lowerCAmelCase : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
_lowerCAmelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
_lowerCAmelCase : Any = metric.compute()
return eval_metric["accuracy"]
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Dict = config["""lr"""]
_lowerCAmelCase : str = int(config["""num_epochs"""] )
_lowerCAmelCase : int = int(config["""seed"""] )
_lowerCAmelCase : Tuple = int(config["""batch_size"""] )
_lowerCAmelCase : Dict = args.model_name_or_path
set_seed(A__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
_lowerCAmelCase : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[Any] = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
_lowerCAmelCase : str = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase : Tuple = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase : Tuple = args.resume_from_checkpoint.split("""epoch_""" )[1]
_lowerCAmelCase : Dict = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase : Optional[int] = int(A__ ) + 1
_lowerCAmelCase : List[str] = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json" ) , """r""" ) as f:
_lowerCAmelCase : List[str] = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase : Any = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
_lowerCAmelCase : Tuple = model(**A__ )
_lowerCAmelCase : List[Any] = outputs.loss
_lowerCAmelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase : int = F"epoch_{epoch}"
_lowerCAmelCase : Tuple = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
_lowerCAmelCase : List[str] = evaluation_loop(A__ , A__ , A__ , A__ )
_lowerCAmelCase : Optional[Any] = accuracy
_lowerCAmelCase : List[str] = lr_scheduler.get_lr()[0]
_lowerCAmelCase : List[Any] = optimizer.param_groups[0]["""lr"""]
_lowerCAmelCase : Any = epoch
_lowerCAmelCase : str = overall_step
accelerator.print(F"epoch {epoch}:" , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json" ) , """w""" ) as f:
json.dump(A__ , A__ )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Dict = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : Optional[int] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 714
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCamelCase : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_lowerCamelCase : Optional[Any] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"emoji": True,
},
}
]
_lowerCamelCase : str = 0
for log in Path().glob("*.log"):
_lowerCamelCase : str = 0
with open(log, "r") as f:
for line in f:
_lowerCamelCase : Optional[Any] = json.loads(line)
if line.get("nodeid", "") != "":
_lowerCamelCase : str = line["nodeid"]
if line.get("duration", None) is not None:
_lowerCamelCase : str = F'''{line['duration']:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCamelCase : Optional[int] = []
log.unlink()
_lowerCamelCase : str = ""
_lowerCamelCase : Optional[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCamelCase : Any = []
_lowerCamelCase : Dict = {}
for test in failed_tests:
_lowerCamelCase : Union[str, Any] = test[0].split("::")
_lowerCamelCase : str = data[0].split("/")[-1]
if data[0] not in filesafailed:
_lowerCamelCase : List[str] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCamelCase : List[Any] = [test[0] for test in failed_table]
_lowerCamelCase : Optional[Any] = list(set(files))
# Count number of instances in failed_tests
_lowerCamelCase : Union[str, Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCamelCase : Optional[Any] = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
_lowerCamelCase : List[str] = "Too many failed tests, please see the full report in the Action results."
_lowerCamelCase : Tuple = len(err) + 1_0
_lowerCamelCase : Union[str, Any] = message[: 3_0_0_0 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
_lowerCamelCase : List[str] = "No failed tests! 🤗"
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_lowerCamelCase : Optional[int] = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_lowerCamelCase : int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_lowerCamelCase : List[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
_lowerCamelCase : Union[str, Any] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_lowerCamelCase : int = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_lowerCamelCase : List[Any] = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCamelCase : Dict = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCamelCase : Optional[int] = row[0]
else:
_lowerCamelCase : List[str] = ""
_lowerCamelCase : int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 196
| 0
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowercase (datasets.BuilderConfig ):
"""simple docstring"""
_UpperCAmelCase = None
class __lowercase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_UpperCAmelCase = PandasConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE_ : Dict = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ : List[str] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase__ ) )
yield i, self._cast_table(lowerCAmelCase__ )
| 101
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( lowercase__):
return len(set(lowercase__)) == len(lowercase__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
class lowercase :
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = name
lowerCamelCase__ = val
def __str__( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[str] , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
return self.val < other.val
class lowercase :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = self.build_heap(__lowerCamelCase )
def __getitem__( self : Tuple , __lowerCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.get_value(__lowerCamelCase )
def a__ ( self : str , __lowerCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
return (idx - 1) // 2
def a__ ( self : List[str] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
return idx * 2 + 1
def a__ ( self : Any , __lowerCamelCase : str ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def a__ ( self : Any , __lowerCamelCase : int ) -> Dict:
'''simple docstring'''
return self.heap_dict[key]
def a__ ( self : str , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase__ = len(__lowerCamelCase ) - 1
lowerCamelCase__ = self.get_parent_idx(__lowerCamelCase )
for idx, i in enumerate(__lowerCamelCase ):
lowerCamelCase__ = idx
lowerCamelCase__ = i.val
for i in range(__lowerCamelCase , -1 , -1 ):
self.sift_down(__lowerCamelCase , __lowerCamelCase )
return array
def a__ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ) -> int:
'''simple docstring'''
while True:
lowerCamelCase__ = self.get_left_child_idx(__lowerCamelCase ) # noqa: E741
lowerCamelCase__ = self.get_right_child_idx(__lowerCamelCase )
lowerCamelCase__ = idx
if l < len(__lowerCamelCase ) and array[l] < array[idx]:
lowerCamelCase__ = l
if r < len(__lowerCamelCase ) and array[r] < array[smallest]:
lowerCamelCase__ = r
if smallest != idx:
lowerCamelCase__ , lowerCamelCase__ = array[smallest], array[idx]
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase__ = smallest
else:
break
def a__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = self.get_parent_idx(__lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase__ , lowerCamelCase__ = self.heap[idx], self.heap[p]
lowerCamelCase__ , lowerCamelCase__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase__ = p
lowerCamelCase__ = self.get_parent_idx(__lowerCamelCase )
def a__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.heap[0]
def a__ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.heap[-1], self.heap[0]
lowerCamelCase__ , lowerCamelCase__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def a__ ( self : List[str] , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
self.heap.append(__lowerCamelCase )
lowerCamelCase__ = len(self.heap ) - 1
lowerCamelCase__ = node.val
self.sift_up(len(self.heap ) - 1 )
def a__ ( self : Dict ) -> Any:
'''simple docstring'''
return len(self.heap ) == 0
def a__ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase__ = new_value
lowerCamelCase__ = new_value
self.sift_up(self.idx_of_element[node] )
__A : str = Node("""R""", -1)
__A : Dict = Node("""B""", 6)
__A : Tuple = Node("""A""", 3)
__A : str = Node("""X""", 1)
__A : Optional[Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
__lowercase = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
__lowercase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowercase = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=UpperCamelCase__ , output_all_encodings=UpperCamelCase__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , UpperCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowercase = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__lowercase = os.path.join(get_home_dir() , '''models''' )
__lowercase = _load_vocab(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls=UpperCamelCase__ )
__lowercase = nlp.model.BERTModel(
UpperCamelCase__ , len(UpperCamelCase__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=UpperCamelCase__ , use_token_type_embed=UpperCamelCase__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=UpperCamelCase__ , use_decoder=UpperCamelCase__ , )
original_bort.load_parameters(UpperCamelCase__ , cast_dtype=UpperCamelCase__ , ignore_extra=UpperCamelCase__ )
__lowercase = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowercase = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(UpperCamelCase__ ),
}
__lowercase = BertConfig.from_dict(UpperCamelCase__ )
__lowercase = BertForMaskedLM(UpperCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase__ : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
__lowercase = hf_param.shape
__lowercase = to_torch(params[gluon_param] )
__lowercase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowercase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowercase = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowercase = layer.attention.self
__lowercase = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__lowercase = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__lowercase = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__lowercase = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__lowercase = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__lowercase = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__lowercase = layer.attention.output
__lowercase = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__lowercase = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__lowercase = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__lowercase = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__lowercase = layer.intermediate
__lowercase = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__lowercase = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__lowercase = layer.output
__lowercase = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__lowercase = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__lowercase = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__lowercase = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowercase = RobertaTokenizer.from_pretrained('''roberta-base''' )
__lowercase = tokenizer.encode_plus(UpperCamelCase__ )['''input_ids''']
# Get gluon output
__lowercase = mx.nd.array([input_ids] )
__lowercase = original_bort(inputs=UpperCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase__ )
__lowercase = BertModel.from_pretrained(UpperCamelCase__ )
hf_bort_model.eval()
__lowercase = tokenizer.encode_plus(UpperCamelCase__ , return_tensors='''pt''' )
__lowercase = hf_bort_model(**UpperCamelCase__ )[0]
__lowercase = output_gluon[0].asnumpy()
__lowercase = output_hf[0].detach().numpy()
__lowercase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowercase = np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 332
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'xmod'
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[int]=30_522 , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : Optional[Any]=3_072 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Dict=1e-1_2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[str]="absolute" , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=("en_XX",) , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
__lowercase = pre_norm
__lowercase = adapter_reduction_factor
__lowercase = adapter_layer_norm
__lowercase = adapter_reuse_layer_norm
__lowercase = ln_before_adapter
__lowercase = list(lowerCamelCase__ )
__lowercase = default_language
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def _snake_case ( __snake_case , __snake_case=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_UpperCamelCase = n - 1
_UpperCamelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_UpperCamelCase = 0
while count < prec:
_UpperCamelCase = random.randint(2 , n - 1 )
_UpperCamelCase = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
_UpperCamelCase = True
for _ in range(__snake_case ):
if b == n - 1:
_UpperCamelCase = False
break
_UpperCamelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCAmelCase = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 71
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71
| 1
|
"""simple docstring"""
def UpperCAmelCase ( _lowercase : list[list] ) -> list[list]:
"""simple docstring"""
lowerCAmelCase_ = current_set.copy()
for row_index, row in enumerate(a_ ):
lowerCAmelCase_ = row[0]
for column_index, column in enumerate(a_ ):
if magnitude == 0:
lowerCAmelCase_ = column
continue
lowerCAmelCase_ = column / magnitude
# Subtract to cancel term
lowerCAmelCase_ = current_set[0]
lowerCAmelCase_ = [first_row]
lowerCAmelCase_ = current_set[1::]
for row in current_set:
lowerCAmelCase_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a_ )
continue
for column_index in range(len(a_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(a_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCAmelCase_ = final_set[0]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCAmelCase_ = simplify(a_ )
for i in range(len(a_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , a_ )
lowerCAmelCase_ = resultant
return final_set
def UpperCAmelCase ( _lowercase : list[list] ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowerCAmelCase_ = len(a_ ) + 1
if any(len(a_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(a_ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(a_ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCAmelCase_ = equations.copy()
if any(0 in row for row in data_set ):
lowerCAmelCase_ = data_set.copy()
lowerCAmelCase_ = []
for row_index, row in enumerate(a_ ):
if 0 not in row:
lowerCAmelCase_ = data_set.pop(a_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , a_ )
lowerCAmelCase_ = data_set.copy()
lowerCAmelCase_ = simplify(a_ )
lowerCAmelCase_ = simplified[::-1]
lowerCAmelCase_ = []
for row in simplified:
lowerCAmelCase_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCAmelCase_ = row.copy()[: len(a_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(a_ ) == 0:
solutions.append(0 )
continue
lowerCAmelCase_ = temp_row[1::]
lowerCAmelCase_ = temp_row[::-1]
for column_index, column in enumerate(a_ ):
current_solution -= column * solutions[column_index]
solutions.append(a_ )
lowerCAmelCase_ = []
for item in solutions:
final.append(float(round(a_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 552
|
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677
| 0
|
"""simple docstring"""
_lowercase : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowercase : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowercase : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 397
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "codegen"
a__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _lowercase : Optional[int]=5_04_00 , _lowercase : List[str]=20_48 , _lowercase : Optional[int]=20_48 , _lowercase : Tuple=40_96 , _lowercase : Optional[Any]=28 , _lowercase : Tuple=16 , _lowercase : str=64 , _lowercase : Dict=None , _lowercase : Any="gelu_new" , _lowercase : Any=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.0 , _lowercase : str=1E-5 , _lowercase : Union[str, Any]=0.02 , _lowercase : List[str]=True , _lowercase : Dict=5_02_56 , _lowercase : str=5_02_56 , _lowercase : Any=False , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_ctx
__UpperCAmelCase = n_positions
__UpperCAmelCase = n_embd
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = n_inner
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = activation_function
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[Any] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Dict ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
def a ( self : Tuple , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 397
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( snake_case_ : list[int | float] , snake_case_ : int , snake_case_ : int ) -> int | float:
'''simple docstring'''
if len(snake_case_ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(snake_case_ )
or left < -len(snake_case_ )
or right >= len(snake_case_ )
or right < -len(snake_case_ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCAmelCase = (left + right) >> 1 # the middle
__lowerCAmelCase = find_max(snake_case_ , snake_case_ , snake_case_ ) # find max in range[left, mid]
__lowerCAmelCase = find_max(snake_case_ , mid + 1 , snake_case_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 427
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 567
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConvNextFeatureExtractor']
_snake_case = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 567
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case :Optional[Any] ={
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :int =[
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__snake_case :List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 224
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case :Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case :Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 224
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase__ = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase__ = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase__ = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase__ = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase = model_type
if use_small:
key += "_small"
return os.path.join(A__ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _A ( A__ , A__ ):
"""simple docstring"""
os.makedirs(A__ , exist_ok=A__ )
hf_hub_download(repo_id=A__ , filename=A__ , local_dir=A__ )
def _A ( A__ , A__ , A__=False , A__="text" ):
"""simple docstring"""
if model_type == "text":
__lowercase = BarkSemanticModel
__lowercase = BarkSemanticConfig
__lowercase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase = BarkCoarseModel
__lowercase = BarkCoarseConfig
__lowercase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase = BarkFineModel
__lowercase = BarkFineConfig
__lowercase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase = F"{model_type}_small" if use_small else model_type
__lowercase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A__ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__lowercase = torch.load(A__ , map_location=A__ )
# this is a hack
__lowercase = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__lowercase = model_args['''vocab_size''']
__lowercase = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase = model_args.pop('''n_head''' )
__lowercase = model_args.pop('''n_embd''' )
__lowercase = model_args.pop('''n_layer''' )
__lowercase = ConfigClass(**checkpoint['''model_args'''] )
__lowercase = ModelClass(config=A__ )
__lowercase = GenerationConfigClass()
__lowercase = model_generation_config
__lowercase = checkpoint['''model''']
# fixup checkpoint
__lowercase = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(A__ ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase = k[len(A__ ) :]
for old_layer_name in new_layer_name_dict:
__lowercase = new_k.replace(A__ , new_layer_name_dict[old_layer_name] )
__lowercase = state_dict.pop(A__ )
__lowercase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__lowercase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(A__ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(A__ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(A__ , strict=A__ )
__lowercase = model.num_parameters(exclude_embeddings=A__ )
__lowercase = checkpoint['''best_val_loss'''].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(A__ , 3 )} loss" )
model.eval()
model.to(A__ )
del checkpoint, state_dict
return model
def _A ( A__ , A__=False , A__="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase = '''cpu''' # do conversion on cpu
__lowercase = _get_ckpt_path(A__ , use_small=A__ )
__lowercase = _load_model(A__ , A__ , model_type=A__ , use_small=A__ )
# load bark initial model
__lowercase = _bark_load_model(A__ , '''cpu''' , model_type=A__ , use_small=A__ )
if model_type == "text":
__lowercase = bark_model['''model''']
if model.num_parameters(exclude_embeddings=A__ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__lowercase = 5
__lowercase = 10
if model_type in ["text", "coarse"]:
__lowercase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase = bark_model(A__ )[0]
__lowercase = model(A__ )
# take last logits
__lowercase = output_new_model_total.logits[:, [-1], :]
else:
__lowercase = 3
__lowercase = 8
__lowercase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase = model(A__ , A__ )
__lowercase = bark_model(A__ , A__ )
__lowercase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , ):
"""simple docstring"""
__lowercase = os.path.join(A__ , A__ )
__lowercase = BarkSemanticConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = BarkCoarseConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = BarkFineConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__lowercase = BarkSemanticModel.from_pretrained(A__ )
__lowercase = BarkCoarseModel.from_pretrained(A__ )
__lowercase = BarkFineModel.from_pretrained(A__ )
__lowercase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__lowercase = BarkConfig.from_sub_model_configs(
A__ , A__ , A__ , A__ )
__lowercase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase = BarkModel(A__ )
__lowercase = semantic
__lowercase = coarseAcoustic
__lowercase = fineAcoustic
__lowercase = codec
__lowercase = bark_generation_config
Path(A__ ).mkdir(exist_ok=A__ )
bark.save_pretrained(A__ , repo_id=A__ , push_to_hub=A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56
| 0
|
UpperCamelCase__ : str = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while place < len(__A ):
if (place + 1 < len(__A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for arabic, roman in ROMAN:
(SCREAMING_SNAKE_CASE_) = divmod(__A , __A )
result.append(roman * factor )
if number == 0:
break
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCAmelCase : Optional[datasets.Features] = None
__lowerCAmelCase : str = "utf-8"
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : bool = True # deprecated
__lowerCAmelCase : Optional[int] = None # deprecated
__lowerCAmelCase : int = 10 << 20 # 10MB
__lowerCAmelCase : Optional[bool] = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : int = JsonConfig
def lowerCAmelCase__ ( self):
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def lowerCAmelCase__ ( self , _A):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_A , (str, list, tuple)):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files}))
return splits
def lowerCAmelCase__ ( self , _A):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema)
return pa_table
def lowerCAmelCase__ ( self , _A):
for file_idx, file in enumerate(itertools.chain.from_iterable(_A)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE_ = json.load(_A)
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple)):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A)
yield file_idx, self._cast_table(_A)
# If the file has one json object per line
else:
with open(_A , 'rb') as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10)
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8')
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid)
and "straddling" not in str(_A)
or block_size > len(_A)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE_ = json.load(_A)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(_A)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A)
batch_idx += 1
| 620
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[str] = '''▁'''
A__ : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = BigBirdTokenizer
lowerCamelCase : Tuple = BigBirdTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
__lowerCamelCase : List[Any] = self.tokenizer_class(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : List[str] = """<s>"""
__lowerCamelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_04 )
def lowercase_ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowercase_ ( self ) -> int:
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Tuple = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCamelCase : Dict = """I was born in 92000, and this is falsé."""
__lowerCamelCase : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.get_rust_tokenizer()
__lowerCamelCase : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Any = BigBirdTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase : str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowercase_ ( self ) -> Any:
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Tuple = """Hello World!"""
__lowerCamelCase : int = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
__lowerCamelCase : Union[str, Any] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@require_torch
@slow
def lowercase_ ( self ) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowerCamelCase : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase : Dict = """ """.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors='pt' , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = BigBirdConfig(attention_type='original_full' )
__lowerCamelCase : Dict = BigBirdModel(SCREAMING_SNAKE_CASE_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_ )
model(**SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> str:
__lowerCamelCase : Optional[int] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
__lowerCamelCase : List[Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def lowercase_ ( self ) -> Any:
# fmt: off
__lowerCamelCase : Union[str, Any] = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 13
|
"""simple docstring"""
__A : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 499
| 0
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case_ = logging.get_logger(__name__)
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = ["input_values", "padding_mask"]
def __init__( self , __lowercase = 1 , __lowercase = 2_4_0_0_0 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , **__lowercase , ) -> List[Any]:
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
lowerCamelCase : Dict =chunk_length_s
lowerCamelCase : List[Any] =overlap
@property
def __lowercase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowercase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __lowercase , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowerCamelCase : List[Any] =True
lowerCamelCase : int =bool(
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowerCamelCase : int =[np.asarray(__lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
lowerCamelCase : Optional[int] =np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCamelCase : Optional[int] =raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : Dict =[np.asarray(__lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowercase ):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" )
lowerCamelCase : Tuple =None
lowerCamelCase : Any =BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCamelCase : str =min(array.shape[0] for array in raw_audio )
lowerCamelCase : str =int(np.floor(max_length / self.chunk_stride ) )
lowerCamelCase : List[Any] =(nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCamelCase : str =max(array.shape[0] for array in raw_audio )
lowerCamelCase : int =int(np.ceil(max_length / self.chunk_stride ) )
lowerCamelCase : Optional[int] =(nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCamelCase : List[Any] ='''max_length'''
else:
lowerCamelCase : Optional[Any] =input_values
# normal padding on batch
if padded_inputs is None:
lowerCamelCase : List[Any] =self.pad(
__lowercase , max_length=__lowercase , truncation=__lowercase , padding=__lowercase , return_attention_mask=__lowercase , )
if padding:
lowerCamelCase : int =padded_inputs.pop('''attention_mask''' )
lowerCamelCase : Dict =[]
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowerCamelCase : Any =example[..., None]
input_values.append(example.T )
lowerCamelCase : List[Any] =input_values
if return_tensors is not None:
lowerCamelCase : str =padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs
| 701
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
snake_case_ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def A__ ( SCREAMING_SNAKE_CASE_ ) -> int:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase : List[Any] =list(s_dict.keys() )
for key in keys:
lowerCamelCase : Dict =R'''.*/layers_(\d+)'''
lowerCamelCase : Optional[int] =key
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] =re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =R'''(encoder|decoder)\/'''
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict =re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups()
if groups[0] == "encoder":
lowerCamelCase : Dict =re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
elif groups[0] == "decoder":
lowerCamelCase : List[str] =re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase : Dict =new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"{key} -> {new_key}" )
lowerCamelCase : Optional[Any] =s_dict.pop(SCREAMING_SNAKE_CASE_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Dict =s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Union[str, Any] =s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase : List[Any] =s_dict[key].shape[0]
lowerCamelCase : int =s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
snake_case_ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
lowerCamelCase : str =f.read()
lowerCamelCase : Union[str, Any] =re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase : str =float(SCREAMING_SNAKE_CASE_ ) if '''.''' in value else int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase : Any =str(activation[1] )
lowerCamelCase : Tuple =num_experts
lowerCamelCase : Any =SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ )
return config
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="./" , SCREAMING_SNAKE_CASE_=8 ) -> Dict:
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
lowerCamelCase : List[Any] =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
if gin_file is not None:
lowerCamelCase : Dict =convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =flax_params['''target''']
lowerCamelCase : Optional[int] =flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
lowerCamelCase : str =rename_keys(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] =unflatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
snake_case_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 262
| 0
|
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : int ):
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 63
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 686
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1 , __lowerCamelCase: int = 1000 ):
lowercase_ = 1
lowercase_ = 0
for divide_by_number in range(__lowerCamelCase , digit + 1 ):
lowercase_ = []
lowercase_ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
lowercase_ = len(__lowerCamelCase )
lowercase_ = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
lowercase_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
SCREAMING_SNAKE_CASE__ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.999_995 , metadata={"help": "Decay of gumbel temperature during training."} )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: ModelArguments , __lowerCamelCase: TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ = logging.WARNING
if model_args.verbose_logging:
lowercase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase_ = logging.INFO
logger.setLevel(__lowerCamelCase )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "longest"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowercase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase_ = 1
lowercase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
lowercase_ = 0
lowercase_ = max_gumbel_temp
lowercase_ = min_gumbel_temp
lowercase_ = gumbel_temp_decay
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowercase_ = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowercase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCamelCase , __lowerCamelCase )
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCamelCase )
def prepare_dataset(__lowerCamelCase: Dict ):
# check that all files have the correct sampling rate
lowercase_ , lowercase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase_ = datasets.map(
__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase_ = vectorized_datasets.filter(
lambda __lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCamelCase: Optional[Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase_ = vectorized_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
lowercase_ = WavaVecaForPreTraining(__lowerCamelCase )
lowercase_ = DataCollatorForWavaVecaPretraining(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
lowercase_ = WavaVecaPreTrainer(
model=__lowerCamelCase , data_collator=__lowerCamelCase , args=__lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 601
| 0
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> bool:
SCREAMING_SNAKE_CASE_ : Optional[Any] =0
for ch in input_str:
SCREAMING_SNAKE_CASE_ : List[Any] =ord(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =pow(2 , UpperCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443
|
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int = 1_0_0_0_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE_ : List[Any] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
SCREAMING_SNAKE_CASE_ : List[str] =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE_ : List[str] =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 443
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _a ( __lowerCamelCase ):
def __init__( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: int=99 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Dict=5 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: str=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=512 , UpperCamelCase_: List[Any]=16 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: str=None , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = DistilBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] ) -> int:
"""simple docstring"""
lowercase__ = DistilBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = DistilBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = DistilBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = DistilBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = DistilBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowercase : str = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : Optional[Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Optional[Any] = True
_lowercase : Dict = True
_lowercase : Dict = True
_lowercase : List[str] = True
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = DistilBertModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DistilBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''traced_model.pt''' ) )
lowercase__ = torch.jit.load(os.path.join(UpperCamelCase_ , '''traced_model.pt''' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['''input_ids'''].to(UpperCamelCase_ ) , inputs_dict['''attention_mask'''].to(UpperCamelCase_ ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase__ = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) )
| 714
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(len(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = [v / w for v, w in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
index.sort(key=lambda SCREAMING_SNAKE_CASE : ratio[i] , reverse=SCREAMING_SNAKE_CASE )
lowercase__ = 0
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE )
for i in index:
if weight[i] <= capacity:
lowercase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple=14 ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : str=True ,lowerCamelCase : List[str]=True ,lowerCamelCase : Dict=True ,lowerCamelCase : Any=True ,lowerCamelCase : int=True ,lowerCamelCase : Dict=99 ,lowerCamelCase : Dict=32 ,lowerCamelCase : Optional[Any]=5 ,lowerCamelCase : Tuple=4 ,lowerCamelCase : Optional[int]=37 ,lowerCamelCase : Optional[int]="gelu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=512 ,lowerCamelCase : int=16 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : str=3 ,lowerCamelCase : Union[str, Any]=4 ,lowerCamelCase : Any=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = use_mc_token_ids
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = self.vocab_size - 1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Any ,lowerCamelCase : List[str] ,lowerCamelCase : str ,*lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase ,token_type_ids=lowerCamelCase ,head_mask=lowerCamelCase )
model(lowerCamelCase ,token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,*lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,*lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCamelCase : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[str] = True
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Any ,lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,n_embd=37 )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[1_1859, 0, 1611, 8]] ,dtype=torch.long ,device=lowerCamelCase ) # Legal the president is
__SCREAMING_SNAKE_CASE = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() ,lowerCamelCase )
| 109
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> typing.Counter[int]:
snake_case__ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(UpperCamelCase__ , max_perimeter + 1 ):
snake_case__ : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(UpperCamelCase__ ):
snake_case__ : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __UpperCAmelCase ( UpperCamelCase__ :int = 1000 ) -> int:
snake_case__ : Tuple = pythagorean_triple(UpperCamelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 574
|
'''simple docstring'''
import argparse
import os
import re
_lowercase : str ="src/transformers"
# Pattern that looks at the indentation in a line.
_lowercase : List[Any] =re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : Optional[Any] =re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Tuple =re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : List[Any] =re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : int =re.compile(R"\[([^\]]+)\]")
def __UpperCAmelCase ( UpperCamelCase__ :List[str] ) -> Tuple:
snake_case__ : str = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int="" , UpperCamelCase__ :Optional[int]=None , UpperCamelCase__ :str=None ) -> int:
snake_case__ : Union[str, Any] = 0
snake_case__ : int = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
snake_case__ : Dict = ['''\n'''.join(lines[:index] )]
else:
snake_case__ : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : int = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
snake_case__ : Any = [lines[index + 1]]
index += 1
else:
snake_case__ : Any = []
else:
blocks.append('''\n'''.join(UpperCamelCase__ ) )
snake_case__ : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append('''\n'''.join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def __UpperCAmelCase ( UpperCamelCase__ :Union[str, Any] ) -> int:
def _inner(UpperCamelCase__ :Dict ):
return key(UpperCamelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :List[str]=None ) -> Optional[Any]:
# If no key is provided, we use a noop.
def noop(UpperCamelCase__ :List[str] ):
return x
if key is None:
snake_case__ : Optional[Any] = noop
# Constants are all uppercase, they go first.
snake_case__ : Dict = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : Optional[int] = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : Any = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
snake_case__ : Union[str, Any] = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(UpperCamelCase__ :Union[str, Any] ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case__ : Dict = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Tuple = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]"
snake_case__ : Optional[int] = import_statement.split('''\n''' )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Optional[Any] = 2 if lines[1].strip() == '''[''' else 1
snake_case__ : Union[str, Any] = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : Dict = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )
snake_case__ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : Any = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Dict = keys[:-1]
snake_case__ : Union[str, Any] = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Dict = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Dict=True ) -> Dict:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[Any] = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Any = main_blocks[block_idx]
snake_case__ : Dict = block.split('''\n''' )
# Get to the start of the imports.
snake_case__ : List[str] = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : List[Any] = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : Optional[Any] = '''\n'''.join(block_lines[line_idx:-1] )
snake_case__ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : List[str] = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[int] = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Any = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
snake_case__ : Optional[int] = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : Dict = 0
snake_case__ : Dict = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : int = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase__ ) )
def __UpperCAmelCase ( UpperCamelCase__ :Optional[Any]=True ) -> Union[str, Any]:
snake_case__ : str = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
snake_case__ : int = sort_imports(os.path.join(UpperCamelCase__ , '''__init__.py''' ) , check_only=UpperCamelCase__ )
if result:
snake_case__ : Optional[Any] = [os.path.join(UpperCamelCase__ , '''__init__.py''' )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : Tuple =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 574
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=13 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=10 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : List[str]="divided_space_time" , __lowerCamelCase : List[Any]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_frames
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = attention_type
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = (num_frames) * self.num_patches_per_frame + 1
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE = self.num_labels
return config
def _snake_case ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TimesformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TimesformerForVideoClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify the logits shape
SCREAMING_SNAKE_CASE = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str=False ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _snake_case ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _snake_case ( self : Optional[Any] ):
pass
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : str ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TimesformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.num_frames
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _snake_case ( self : Any ):
def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_video()
SCREAMING_SNAKE_CASE = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 16
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
| 187
| 0
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list ) -> list:
SCREAMING_SNAKE_CASE_ : Tuple =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
for i in range(0 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ : int =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_ : List[Any] =False
for i in range(1 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ : Tuple =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_ : Any =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_lowercase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_lowercase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 717
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict ) -> Optional[int]:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : List[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag("""IGNORE_RESULT""")
_lowercase = doctest.OutputChecker
class lowercase_ ( A ):
def _snake_case ( self , __A , __A , __A ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __A , __A , __A )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 431
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( _a , unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@property
def snake_case_ ( self : List[Any] ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
_A = ort.SessionOptions()
_A = False
return options
def snake_case_ ( self : int ) -> Dict:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_A = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_A = "A red cat sitting on a park bench"
_A = np.random.RandomState(0 )
_A = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='''np''' , )
_A = output.images
_A = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_A = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
_A = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_A = "A red cat sitting on a park bench"
_A = np.random.RandomState(0 )
_A = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='''np''' , )
_A = output.images
_A = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 2
|
def _A ( SCREAMING_SNAKE_CASE ):
stooge(SCREAMING_SNAKE_CASE ,0 ,len(SCREAMING_SNAKE_CASE ) - 1 )
return arr
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase__ , UpperCAmelCase__: int = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase__: str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,(h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE ,i + t ,(SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,(h - t) )
if __name__ == "__main__":
_lowerCAmelCase : List[str] =input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Any =[int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 113
| 0
|
_a : Any = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 111
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : str = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __magic_name__ :
def __init__( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_3 ,__SCREAMING_SNAKE_CASE : int=7 ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : Dict=9_9 ,__SCREAMING_SNAKE_CASE : int=3_2 ,__SCREAMING_SNAKE_CASE : List[str]=2 ,__SCREAMING_SNAKE_CASE : List[Any]=4 ,__SCREAMING_SNAKE_CASE : int=3_7 ,__SCREAMING_SNAKE_CASE : Any="gelu" ,__SCREAMING_SNAKE_CASE : Tuple=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : int=1_6 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=2 ,__SCREAMING_SNAKE_CASE : int=0.02 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=3 ,__SCREAMING_SNAKE_CASE : Any=4 ,__SCREAMING_SNAKE_CASE : str=None ,):
UpperCAmelCase = parent
UpperCAmelCase = 1_3
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 9_9
UpperCAmelCase = 3_8_4
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 3_7
UpperCAmelCase = "gelu"
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 5_1_2
UpperCAmelCase = 1_6
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = 1_2_8
UpperCAmelCase = 2
UpperCAmelCase = 9
UpperCAmelCase = 1
UpperCAmelCase = None
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=__SCREAMING_SNAKE_CASE ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = False
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = TFConvBertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 )
def _UpperCAmelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = True
if hasattr(__SCREAMING_SNAKE_CASE ,"use_cache" ):
UpperCAmelCase = True
UpperCAmelCase = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
UpperCAmelCase = getattr(self.model_tester ,"key_length" ,__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE ,saved_model=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"saved_model" ,"1" )
UpperCAmelCase = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase = outputs["encoder_hidden_states"]
UpperCAmelCase = outputs["encoder_attentions"]
else:
UpperCAmelCase = outputs["hidden_states"]
UpperCAmelCase = outputs["attentions"]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = getattr(
self.model_tester ,"expected_num_hidden_layers" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = getattr(self.model_tester ,"decoder_seq_length" ,self.model_tester.seq_length )
UpperCAmelCase = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
UpperCAmelCase = getattr(self.model_tester ,"key_length" ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = getattr(self.model_tester ,"key_length" ,__SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 ,0 )
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states ,__SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states ,__SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states ,__SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states ,__SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 )
| 333
|
import os
def __UpperCamelCase ( ):
"""simple docstring"""
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 333
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__magic_name__ : Union[str, Any] = set()
# Replace all the whitespace in our sentence
__magic_name__ : List[Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCamelCase ) == 26
def UpperCamelCase_ ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__magic_name__ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
__magic_name__ : List[Any] = True
elif char.isupper():
__magic_name__ : Tuple = True
return all(lowerCamelCase )
def UpperCamelCase_ ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase_ ( ) -> None:
from timeit import timeit
__magic_name__ : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCamelCase ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCamelCase ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A = datasets.utils.logging.get_logger(__name__)
A = ["""names""", """prefix"""]
A = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
A = ["""encoding_errors""", """on_bad_lines"""]
A = ["""date_format"""]
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case_ = ","
snake_case_ = None
snake_case_ = "infer"
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = False
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = None
snake_case_ = "."
snake_case_ = None
snake_case_ = '"'
snake_case_ = 0
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = None
snake_case_ = 1_0_0_0_0
snake_case_ = None
snake_case_ = "strict"
snake_case_ = "error"
snake_case_ = None
def _UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
__magic_name__ : Optional[Any] = self.delimiter
if self.column_names is not None:
__magic_name__ : Union[str, Any] = self.column_names
@property
def _UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case_ = CsvConfig
def _UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__magic_name__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
__magic_name__ : List[str] = data_files
if isinstance(snake_case , snake_case ):
__magic_name__ : Optional[int] = [files]
__magic_name__ : List[Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__magic_name__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
__magic_name__ : List[Any] = [files]
__magic_name__ : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'''files''': files} ) )
return splits
def _UpperCAmelCase ( self : List[Any] , snake_case : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__magic_name__ : int = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case ) for feature in self.config.features.values() ):
# cheaper cast
__magic_name__ : List[str] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__magic_name__ : Optional[int] = table_cast(snake_case , snake_case )
return pa_table
def _UpperCAmelCase ( self : Optional[int] , snake_case : Any ) -> Any:
'''simple docstring'''
__magic_name__ : str = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__magic_name__ : List[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
__magic_name__ : int = pd.read_csv(snake_case , iterator=snake_case , dtype=snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case ):
__magic_name__ : str = pa.Table.from_pandas(snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(snake_case )}: {e}""" )
raise
| 147
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__a = logging.get_logger(__name__)
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**_SCREAMING_SNAKE_CASE ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ : Any = deprecated_arg[3:]
UpperCAmelCase_ : Any = not kwargs.pop(_SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase_ : List[str] = kwargs.pop('''tpu_name''' ,self.tpu_name )
UpperCAmelCase_ : Any = kwargs.pop('''device_idx''' ,self.device_idx )
UpperCAmelCase_ : Dict = kwargs.pop('''eager_mode''' ,self.eager_mode )
UpperCAmelCase_ : Any = kwargs.pop('''use_xla''' ,self.use_xla )
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Name of TPU'''} , )
lowerCAmelCase = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['''tf'''] )
UpperCAmelCase_ : Tuple = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase_ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase_ : Optional[int] = None
return tpu
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase_ : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'''GPU''' )
UpperCAmelCase_ : Any = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] ,'''GPU''' ) # disable GPU
UpperCAmelCase_ : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def a__ ( self ) -> bool:
requires_backends(self ,['''tf'''] )
return self._setup_tpu is not None
@property
def a__ ( self ) -> "tf.distribute.Strategy":
requires_backends(self ,['''tf'''] )
return self._setup_strategy
@property
def a__ ( self ) -> Tuple:
requires_backends(self ,['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def a__ ( self ) -> int:
requires_backends(self ,['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ) -> bool:
return self.n_gpu > 0
| 30
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger()
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: str ,lowerCAmelCase__: LevitConfig ,lowerCAmelCase__: Path ,lowerCAmelCase__: bool = True ) -> Optional[int]:
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_128s' ,pretrained=lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_128' ,pretrained=lowerCAmelCase__ )
if hidden_sizes == 192:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_192' ,pretrained=lowerCAmelCase__ )
if hidden_sizes == 256:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_256' ,pretrained=lowerCAmelCase__ )
if hidden_sizes == 384:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_384' ,pretrained=lowerCAmelCase__ )
from_model.eval()
SCREAMING_SNAKE_CASE_ = LevitForImageClassificationWithTeacher(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = from_model.state_dict()
SCREAMING_SNAKE_CASE_ = list(from_model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) )
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch.randn((2, 3, 224, 224) )
SCREAMING_SNAKE_CASE_ = from_model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = our_model(lowerCAmelCase__ ).logits
assert torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ = name
print(lowerCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
SCREAMING_SNAKE_CASE_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _UpperCamelCase ( lowerCAmelCase__: Path ,lowerCAmelCase__: str = None ,lowerCAmelCase__: bool = True ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = 1000
SCREAMING_SNAKE_CASE_ = (1, num_labels)
SCREAMING_SNAKE_CASE_ = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='dataset' ) ,'r' ) )
SCREAMING_SNAKE_CASE_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = partial(lowerCAmelCase__ ,num_labels=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
SCREAMING_SNAKE_CASE_ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,lowerCAmelCase__ ,names_to_config[model_name] ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 294
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''gpt-neox-20b''': 2_0_4_8,
}
class a__ ( UpperCamelCase_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,a__ : List[str]=None ,a__ : Any=None ,a__ : Any=None ,a__ : List[Any]="<|endoftext|>" ,a__ : Optional[Any]="<|endoftext|>" ,a__ : List[str]="<|endoftext|>" ,a__ : List[str]=False ,**a__ : int ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
a__ ,a__ ,tokenizer_file=a__ ,unk_token=a__ ,bos_token=a__ ,eos_token=a__ ,add_prefix_space=a__ ,**a__ ,)
_lowerCAmelCase:Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' ,a__) != add_prefix_space:
_lowerCAmelCase:Union[str, Any] = getattr(a__ ,pre_tok_state.pop('''type'''))
_lowerCAmelCase:str = add_prefix_space
_lowerCAmelCase:Optional[int] = pre_tok_class(**a__)
_lowerCAmelCase:int = add_prefix_space
def __UpperCamelCase ( self : List[Any] ,a__ : str ,a__ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self._tokenizer.model.save(a__ ,name=a__)
return tuple(a__)
def __UpperCamelCase ( self : int ,a__ : "Conversation") -> List[int]:
"""simple docstring"""
_lowerCAmelCase:Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ ,add_special_tokens=a__) + [self.eos_token_id])
if len(a__) > self.model_max_length:
_lowerCAmelCase:Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 439
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a__ ( UpperCamelCase_ ):
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 439
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A : Union[str, Any] = logging.get_logger(__name__)
class a_ ( _a ):
a : Any = ['''input_features''']
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=16_000 , __UpperCamelCase=160 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=0.0 , __UpperCamelCase=False , **__UpperCamelCase , ):
super().__init__(
feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
_lowercase = n_fft
_lowercase = hop_length
_lowercase = chunk_length
_lowercase = chunk_length * sampling_rate
_lowercase = self.n_samples // hop_length
_lowercase = sampling_rate
_lowercase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCamelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__UpperCamelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCamelCase_ ( self , __UpperCamelCase ):
_lowercase = spectrogram(
__UpperCamelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
_lowercase = log_spec[:, :-1]
_lowercase = np.maximum(__UpperCamelCase , log_spec.max() - 8.0 )
_lowercase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 ):
if attention_mask is not None:
_lowercase = np.array(__UpperCamelCase , np.intaa )
_lowercase = []
for vector, length in zip(__UpperCamelCase , attention_mask.sum(-1 ) ):
_lowercase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowercase = padding_value
normed_input_values.append(__UpperCamelCase )
else:
_lowercase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "max_length" , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowercase = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_lowercase = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_lowercase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase = [np.asarray([raw_speech] ).T]
_lowercase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
_lowercase = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowercase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
_lowercase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
_lowercase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
_lowercase = [self._np_extract_fbank_features(__UpperCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __UpperCamelCase ):
_lowercase = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
else:
_lowercase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowercase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
_lowercase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
def UpperCamelCase_ ( self ):
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 287
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a_ ( _a ):
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a_ ( _a ):
a : np.ndarray
a : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 287
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class UpperCamelCase ( lowerCAmelCase__ ):
lowercase = "git_vision_model"
def __init__( self ,__UpperCamelCase=768 ,__UpperCamelCase=3072 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3 ,__UpperCamelCase=224 ,__UpperCamelCase=16 ,__UpperCamelCase="quick_gelu" ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
lowercase_ : Any = hidden_size
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Dict = num_channels
lowercase_ : Dict = patch_size
lowercase_ : Tuple = image_size
lowercase_ : List[Any] = initializer_range
lowercase_ : List[str] = attention_dropout
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : str = hidden_act
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
lowercase_ , lowercase_ : int = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
lowercase_ : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class UpperCamelCase ( lowerCAmelCase__ ):
lowercase = "git"
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=6 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1024 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=101 ,__UpperCamelCase=102 ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
if vision_config is None:
lowercase_ : Optional[int] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
lowercase_ : int = GitVisionConfig(**_lowerCamelCase )
lowercase_ : str = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Optional[int] = initializer_range
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Any = position_embedding_type
lowercase_ : List[Any] = use_cache
lowercase_ : Optional[int] = tie_word_embeddings
lowercase_ : str = num_image_with_embedding
lowercase_ : str = bos_token_id
lowercase_ : int = eos_token_id
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = copy.deepcopy(self.__dict__ )
lowercase_ : Dict = self.vision_config.to_dict()
lowercase_ : Union[str, Any] = self.__class__.model_type
return output
| 712
|
"""simple docstring"""
from math import sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00_01 ):
lowercase_ : str = 0
lowercase_ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(__SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(__SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }")
| 477
| 0
|
from collections import defaultdict
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
lowerCamelCase : str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase : str = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_UpperCamelCase ) )
]
lowerCamelCase : Optional[int] = defaultdict(_UpperCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase : str = (1 << len(_UpperCamelCase )) - 1
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase : Tuple = self.count_ways_until(_UpperCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCamelCase : str = total_ways_util
return self.dp[mask][task_no]
def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]:
for i in range(len(_UpperCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(_UpperCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 311
|
class lowercase : # Public class to implement a graph
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = row
SCREAMING_SNAKE_CASE = col
SCREAMING_SNAKE_CASE = graph
def __snake_case( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase )
def __snake_case( self : Any ) -> int: # And finally, count all islands.
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
count += 1
return count
| 403
| 0
|
import operator as op
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[int] = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
__lowerCAmelCase : Optional[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(1_2 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (3_0 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(1_2 ) , ''','''.join(lowercase__ ) , sep=''' | ''' )
else:
__lowerCAmelCase : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(1_2 ) , ''','''.join(lowercase__ ) , sep=''' | ''' )
__lowerCAmelCase : List[str] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(1_2 ) , ''','''.join(lowercase__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(1_2 ) , ''','''.join(lowercase__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_UpperCamelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 711
|
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowercase ( lowercase__ = 2_0_0_0_0_0_0 ):
__lowerCAmelCase : list[int] = [0]
__lowerCAmelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase : int = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase : float
# the largest integer less than b_estimate
__lowerCAmelCase : int
# the largest integer less than b_estimate
__lowerCAmelCase : int
# the triangle number corresponding to b_floor
__lowerCAmelCase : int
# the triangle number corresponding to b_ceil
__lowerCAmelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCAmelCase : Any = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase : List[str] = floor(lowercase__ )
__lowerCAmelCase : str = ceil(lowercase__ )
__lowerCAmelCase : Dict = triangle_numbers[b_floor]
__lowerCAmelCase : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase : Union[str, Any] = triangle_b_first_guess * triangle_a
__lowerCAmelCase : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase : Optional[Any] = triangle_b_second_guess * triangle_a
__lowerCAmelCase : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 583
| 0
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : List[Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> bool:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = number
while duplicate > 0:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = divmod(a__, 10 )
fact_sum += factorial(a__ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
snake_case_ : Dict = int(input("Enter number: ").strip())
print(
f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int , __A : int , __A : Optional[Any]=13 , __A : Any=7 , __A : Any=True , __A : List[Any]=True , __A : str=True , __A : int=True , __A : Optional[Any]=99 , __A : Dict=32 , __A : int=5 , __A : Optional[int]=4 , __A : Dict=37 , __A : List[str]="gelu" , __A : str=0.1 , __A : int=0.1 , __A : Optional[int]=512 , __A : Optional[int]=16 , __A : List[str]=2 , __A : List[Any]=0.0_2 , __A : int=4 , ):
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : int = seq_length
__A : Any = is_training
__A : Optional[int] = use_attention_mask
__A : List[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : Optional[Any] = vocab_size
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : Optional[int] = hidden_act
__A : Any = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[str] = type_sequence_label_size
__A : List[str] = initializer_range
__A : List[Any] = num_choices
def lowerCAmelCase_ ( self : List[Any] ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Any = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A , __A : Tuple = config_and_inputs
__A : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
__A : Tuple = self.prepare_config_and_inputs()
__A , __A , __A , __A : List[Any] = config_and_inputs
__A : Optional[int] = True
__A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : str = True
_lowercase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : List[str] ):
__A : List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
__A : List[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : int ):
__A : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : str = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : List[Any] = model(__A )[0]
__A : str = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
__A : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
__A : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : Optional[int] = model(__A )[0]
# compare the actual values for a slice.
__A : Optional[int] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 17
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699
| 0
|
import re
import string
import numpy as np
import datasets
__UpperCamelCase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__UpperCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__UpperCamelCase : Union[str, Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE : int = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE : List[Any] = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = np.asarray(UpperCamelCase__ )
if ignore_case:
SCREAMING_SNAKE_CASE : Dict = np.char.lower(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE : Optional[int] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
SCREAMING_SNAKE_CASE : List[str] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE : Any = string.digits.maketrans('''''' , '''''' , string.digits )
SCREAMING_SNAKE_CASE : Tuple = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
'''simple docstring'''
snake_case = 9.80665
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 378
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
print("moving disk from" , lowerCamelCase_ , "to" , lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = int(input("Height of hanoi: " ).strip() )
move_tower(lowerCamelCase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 378
| 1
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
_lowerCAmelCase =re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , a__ )
if matches:
_lowerCAmelCase =float(matches[1] )
_lowerCAmelCase =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase =1_0_0_1
_lowerCAmelCase ='imagenet-1k-id2label.json'
_lowerCAmelCase ='huggingface/label-files'
_lowerCAmelCase =json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase ={int(a__ ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase ='background'
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase =Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__ , a__=False ):
'''simple docstring'''
_lowerCAmelCase =get_mobilenet_va_config(a__ )
# Load 🤗 model
_lowerCAmelCase =MobileNetVaForImageClassification(a__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(a__ , a__ , a__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase =MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , )
_lowerCAmelCase =image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase =model(**a__ )
_lowerCAmelCase =outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase =torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase =torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
_lowerCAmelCase =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , a__ , atol=1E-4 )
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if push_to_hub:
print('Pushing to the hub...' )
_lowerCAmelCase ='google/' + model_name
image_processor.push_to_hub(a__ )
model.push_to_hub(a__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
"""simple docstring"""
from math import isclose, sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
A__ = point_y / 4 / point_x
A__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A__ = outgoing_gradient**2 + 4
A__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
A__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A__ = x_minus if isclose(UpperCamelCase , UpperCamelCase ) else x_plus
A__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : float = 1.4 , UpperCamelCase : float = -9.6 ):
A__ = 0
A__ = first_x_coord
A__ = first_y_coord
A__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A__ ,A__ ,A__ = next_point(UpperCamelCase , UpperCamelCase , UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 574
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
def get_masked_lm_array(UpperCamelCase : str ):
A__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_array(UpperCamelCase : str ):
A__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_layer_array(UpperCamelCase : int , UpperCamelCase : str ):
A__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
A__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
A__ = array.reshape(UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(UpperCamelCase )
A__ = BertForMaskedLM(UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/bias""" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/bias""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/beta""" )
# Embeddings
A__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
A__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("""dense/kernel""" )
A__ = get_masked_lm_array("""dense/bias""" )
A__ = get_masked_lm_array("""layer_norm/gamma""" )
A__ = get_masked_lm_array("""layer_norm/beta""" )
A__ = get_masked_lm_array("""embedding_table""" )
# Pooling
A__ = BertPooler(config=UpperCamelCase )
A__ = get_encoder_array("""_pooler_layer/kernel""" )
A__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(UpperCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 574
| 1
|
from __future__ import annotations
import math
class _a :
"""simple docstring"""
def __init__( self , _snake_case ):
_UpperCAmelCase =size
# approximate the overall size of segment tree with given value
_UpperCAmelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCAmelCase =[0 for i in range(0 , 4 * size )]
_UpperCAmelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
if left_element == right_element:
_UpperCAmelCase =a[left_element - 1]
else:
_UpperCAmelCase =(left_element + right_element) // 2
self.build(self.left(_snake_case ) , _snake_case , _snake_case , _snake_case )
self.build(self.right(_snake_case ) , mid + 1 , _snake_case , _snake_case )
_UpperCAmelCase =max(
self.segment_tree[self.left(_snake_case )] , self.segment_tree[self.right(_snake_case )] )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
if self.flag[idx] is True:
_UpperCAmelCase =self.lazy[idx]
_UpperCAmelCase =False
if left_element != right_element:
_UpperCAmelCase =self.lazy[idx]
_UpperCAmelCase =self.lazy[idx]
_UpperCAmelCase =True
_UpperCAmelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCAmelCase =val
if left_element != right_element:
_UpperCAmelCase =val
_UpperCAmelCase =val
_UpperCAmelCase =True
_UpperCAmelCase =True
return True
_UpperCAmelCase =(left_element + right_element) // 2
self.update(self.left(_snake_case ) , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
self.update(self.right(_snake_case ) , mid + 1 , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCAmelCase =max(
self.segment_tree[self.left(_snake_case )] , self.segment_tree[self.right(_snake_case )] )
return True
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
if self.flag[idx] is True:
_UpperCAmelCase =self.lazy[idx]
_UpperCAmelCase =False
if left_element != right_element:
_UpperCAmelCase =self.lazy[idx]
_UpperCAmelCase =self.lazy[idx]
_UpperCAmelCase =True
_UpperCAmelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCAmelCase =(left_element + right_element) // 2
_UpperCAmelCase =self.query(self.left(_snake_case ) , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCAmelCase =self.query(self.right(_snake_case ) , mid + 1 , _snake_case , _snake_case , _snake_case )
return max(_snake_case , _snake_case )
def __str__( self ):
return str([self.query(1 , 1 , self.size , _snake_case , _snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
snake_case__ : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
snake_case__ : Dict = 1_5
snake_case__ : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 715
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCamelCase__ ( _lowerCamelCase ) ->List[str]:
_UpperCAmelCase =split_dict._to_yaml_list()
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_UpperCAmelCase =SplitDict._from_yaml_list(_lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase =None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCamelCase ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCAmelCase =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 592
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.