code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import qiskit
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
_UpperCamelCase : List[Any] = qiskit.Aer.get_backend("aer_simulator" )
_UpperCamelCase : Optional[Any] = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
_UpperCamelCase : Tuple = qiskit.execute(_snake_case ,_snake_case ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
lowerCAmelCase_ : str = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 435
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'spiece.model'}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
UpperCAmelCase__ : Optional[int] = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : List[int] = []
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Tuple = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : List[str] = """"""
SCREAMING_SNAKE_CASE__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""use_source_tokenizer""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
sub_texts.append(SCREAMING_SNAKE_CASE__ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : Any = """""".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )
return clean_text
else:
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 223
| 0
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "cache"
_lowerCamelCase : List[str] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Tuple = {"text": "string"}
_lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Any = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"text": "string"}
_lowerCamelCase : Dict = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Dict = text_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : int = [text_path]
_lowerCamelCase : List[Any] = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"text": "string"}
_lowerCamelCase : Union[str, Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=("train",) ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
_lowerCamelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : Dict = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Dict = TextDatasetReader({"train": text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowerCamelCase : Optional[int] = {"text": "string"}
_lowerCamelCase : Optional[int] = features.copy() if features else default_expected_features
_lowerCamelCase : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Tuple = TextDatasetReader({"train": text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if split:
_lowerCamelCase : Tuple = {split: text_path}
else:
_lowerCamelCase : List[str] = "train"
_lowerCamelCase : List[str] = {"train": text_path, "test": text_path}
_lowerCamelCase : str = tmp_path / "cache"
_lowerCamelCase : int = {"text": "string"}
_lowerCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 703
|
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 11
| 0
|
def lowercase ( __A : int ) -> bool:
'''simple docstring'''
return str(__A ) == str(__A )[::-1]
def lowercase ( __A : int ) -> int:
'''simple docstring'''
return int(__A ) + int(str(__A )[::-1] )
def lowercase ( __A : int = 1_0000 ) -> int:
'''simple docstring'''
snake_case : str = []
for num in range(1 , __A ):
snake_case : Union[str, Any] = 0
snake_case : str = num
while iterations < 50:
snake_case : int = sum_reverse(__A )
iterations += 1
if is_palindrome(__A ):
break
else:
lychrel_nums.append(__A )
return len(__A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F'''{test_file} instead.''' )
UpperCAmelCase = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
UpperCAmelCase = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCAmelCase = """.""".join(_snake_case )
return test_module_path
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_module_path(_snake_case )
UpperCAmelCase = importlib.import_module(_snake_case )
return test_module
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = get_test_module(_snake_case )
for attr in dir(_snake_case ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_snake_case , _snake_case ) )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = get_test_module(_snake_case )
for attr in dir(_snake_case ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase = getattr(_snake_case , """all_model_classes""" , [] )
if len(_snake_case ) > 0:
test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes(_snake_case )
UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = test_class()
if hasattr(_snake_case , """setUp""" ):
test.setUp()
UpperCAmelCase = None
if hasattr(_snake_case , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase = test.model_tester.__class__
return model_tester
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes(_snake_case )
UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes_for_model(_snake_case , _snake_case )
UpperCAmelCase = []
for test_class in test_classes:
UpperCAmelCase = get_model_tester_from_test_class(_snake_case )
if tester_class is not None:
tester_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_test_classes(_snake_case )
UpperCAmelCase = {test_class: get_model_tester_from_test_class(_snake_case ) for test_class in test_classes}
return test_tester_mapping
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_model_classes(_snake_case )
UpperCAmelCase = {
model_class: get_test_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_test_mapping
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_model_classes(_snake_case )
UpperCAmelCase = {
model_class: get_tester_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def _a ( _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return o
elif isinstance(_snake_case , _snake_case ):
return o.__name__
elif isinstance(_snake_case , (list, tuple) ):
return [to_json(_snake_case ) for x in o]
elif isinstance(_snake_case , _snake_case ):
return {to_json(_snake_case ): to_json(_snake_case ) for k, v in o.items()}
else:
return o
| 341
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int = None ):
"""simple docstring"""
A =value
A =random()
A =None
A =None
def __repr__( self : List[str] ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Optional[int] ):
"""simple docstring"""
A =str(self.value ) + " "
A =str(self.left or "" )
A =str(self.right or "" )
return value + left + right
def UpperCamelCase_ ( a_ , a_ ) ->tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A =split(root.left , __snake_case )
return left, root
else:
A =split(root.right , __snake_case )
return root, right
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A =merge(left.right , __snake_case )
return left
else:
A =merge(__snake_case , right.left )
return right
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
A =Node(__snake_case )
A =split(__snake_case , __snake_case )
return merge(merge(__snake_case , __snake_case ) , __snake_case )
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
A =split(__snake_case , value - 1 )
A =split(__snake_case , __snake_case )
return merge(__snake_case , __snake_case )
def UpperCamelCase_ ( a_ ) ->None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
for arg in args.split():
if arg[0] == "+":
A =insert(__snake_case , int(arg[1:] ) )
elif arg[0] == "-":
A =erase(__snake_case , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def UpperCamelCase_ ( ) ->None:
A =None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
A =input()
while args != "q":
A =interact_treap(__snake_case , __snake_case )
print(__snake_case )
A =input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689
| 0
|
import logging
import os
from .state import PartialState
class lowerCamelCase (logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def A_ ( _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A_ ( self : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : str, *_UpperCAmelCase : Union[str, Any], **_UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("main_process_only", A_ )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("in_order", A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self.process(A_, A_ )
self.logger.log(A_, A_, *A_, **A_ )
elif in_order:
SCREAMING_SNAKE_CASE__ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.process(A_, A_ )
self.logger.log(A_, A_, *A_, **A_ )
state.wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str = None ) -> Union[str, Any]:
'''simple docstring'''
if log_level is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.environ.get("ACCELERATE_LOG_LEVEL" , _snake_case )
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(_snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_snake_case , {} )
| 663
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_snake_case : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'The name of the task to train on.'} , )
_snake_case : Optional[List[str]] = dataclasses.field(
default=snake_case__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_snake_case : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_snake_case : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_snake_case : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_snake_case : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_snake_case : Optional[int] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Random seed for initialization.'} , )
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 )
if args.do_filter_by_confidence:
__UpperCamelCase = dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__UpperCamelCase = int(eval_result * len(_snake_case ) )
print(_snake_case )
__UpperCamelCase = dataset.sort("probability" ,reverse=_snake_case )
__UpperCamelCase = dataset.select(range(_snake_case ) )
__UpperCamelCase = dataset.remove_columns(["label", "probability"] )
__UpperCamelCase = dataset.rename_column("prediction" ,"label" )
__UpperCamelCase = dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__UpperCamelCase = dataset.shuffle(seed=args.seed )
__UpperCamelCase = os.path.join(_snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case ,index=_snake_case )
else:
dataset.to_json(_snake_case )
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ) -> List[str]:
'''simple docstring'''
__UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__UpperCamelCase = STModelArguments(model_name_or_path=_snake_case )
__UpperCamelCase = STDataArguments(train_file=_snake_case ,infer_file=_snake_case )
__UpperCamelCase = STTrainingArguments(output_dir=_snake_case )
__UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case ,_snake_case ,_snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case ,_snake_case ):
setattr(_snake_case ,_snake_case ,_snake_case )
# Sanity checks
__UpperCamelCase = {}
__UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__UpperCamelCase = args.train_file
__UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__UpperCamelCase = args.eval_file
for key in data_files:
__UpperCamelCase = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
__UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__UpperCamelCase = f"""{args.output_dir}/self-train_iter-{{}}""".format
__UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir ,exist_ok=_snake_case )
os.makedirs(_snake_case ,exist_ok=_snake_case )
accelerator.wait_for_everyone()
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = False
# Show the progress bar
__UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 ,int(args.max_selftrain_iterations ) ):
__UpperCamelCase = data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__UpperCamelCase = os.path.join(_snake_case ,"stage-1" )
__UpperCamelCase = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case ,_snake_case ):
arguments_dict.update({key: value} )
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" ,_snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." ,_snake_case ,_snake_case ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" ,_snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 1." ,_snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" )
__UpperCamelCase = os.path.join(_snake_case ,"stage-2" )
# Update arguments_dict
__UpperCamelCase = model_path
__UpperCamelCase = data_files["train"]
__UpperCamelCase = current_output_dir
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" ,_snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." ,_snake_case ,_snake_case ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" ,_snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 2." ,_snake_case )
__UpperCamelCase = iteration
__UpperCamelCase = data_dir_format(iteration + 1 )
__UpperCamelCase = AutoConfig.from_pretrained(os.path.join(_snake_case ,"best-checkpoint" ) )
__UpperCamelCase = config.idalabel
__UpperCamelCase = os.path.join(_snake_case ,"eval_results_best-checkpoint.json" )
__UpperCamelCase = os.path.join(_snake_case ,"test_results_best-checkpoint.json" )
assert os.path.exists(_snake_case )
with open(_snake_case ,"r" ) as f:
__UpperCamelCase = float(json.load(_snake_case )[args.eval_metric] )
__UpperCamelCase = os.path.join(_snake_case ,"infer_output_best-checkpoint.csv" )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__UpperCamelCase = load_dataset(args.data_file_extension ,data_files={"data": data_files["infer"]} )["data"]
__UpperCamelCase = load_dataset("csv" ,data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_snake_case ,exist_ok=_snake_case )
shutil.copy(_snake_case ,os.path.join(_snake_case ,f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case ,os.path.join(_snake_case ,f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
accelerator.wait_for_everyone()
__UpperCamelCase = os.path.join(_snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__UpperCamelCase = eval_result
if best_iteration is None:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
__UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" ,_snake_case )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,_snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(_snake_case ,"eval_results_best-iteration.json" ) ,)
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" ,args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,_snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(_snake_case ,"eval_results_best-iteration.json" ) ,)
| 505
| 0
|
'''simple docstring'''
import math
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 718
|
'''simple docstring'''
from random import randint, random
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = 5 , ):
lowercase__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = max(UpperCAmelCase , 0 )
while i < number_of_cells:
lowercase__ : str = (
randint(0 , UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = 0
lowercase__ : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase , -1 )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = len(UpperCAmelCase )
# Beforce calculations, the highway is empty
lowercase__ : List[Any] = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ : int = min(highway_now[car_index] + 1 , UpperCAmelCase )
# Number of empty cell before the next car
lowercase__ : Dict = get_distance(UpperCAmelCase , UpperCAmelCase ) - 1
# We can't have the car causing an accident
lowercase__ : int = min(next_highway[car_index] , UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(highway[0] )
for i in range(UpperCAmelCase ):
lowercase__ : Union[str, Any] = update(highway[i] , UpperCAmelCase , UpperCAmelCase )
lowercase__ : Dict = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
lowercase__ : int = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ : List[str] = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ : Union[str, Any] = speed
highway.append(UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase (_lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = PriorTransformer
_UpperCAmelCase = """hidden_states"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = 8
SCREAMING_SNAKE_CASE_ : int = 7
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self , lowerCAmelCase__=0 ):
"""simple docstring"""
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : List[Any] = 8
SCREAMING_SNAKE_CASE_ : Tuple = 7
SCREAMING_SNAKE_CASE_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (4, 8)
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (4, 8)
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : str = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
SCREAMING_SNAKE_CASE_ : int = model.to(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : Dict = output[0, :5].flatten().cpu()
print(lowerCAmelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ : str = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-2 ) )
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__=1 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=7_7 , lowerCAmelCase__=0 ):
"""simple docstring"""
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Any = embedding_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_embeddings
SCREAMING_SNAKE_CASE_ : int = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[3_7, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_seed_input(seed=lowerCAmelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowerCAmelCase__ )[0]
assert list(sample.shape ) == [1, 7_6_8]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
| 101
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : int = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : List[str] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( lowerCAmelCase : int ):
__UpperCAmelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ):
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
__UpperCAmelCase = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''.join(camel_case_split(lowerCAmelCase )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(lowerCAmelCase )
all_models.sort()
__UpperCAmelCase = {'model_type': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = 'AutoTokenizer'
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def __snake_case ( lowerCAmelCase : Any ):
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
__UpperCAmelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowerCAmelCase )
__UpperCAmelCase = Dataset.from_json(lowerCAmelCase )
__UpperCAmelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowerCAmelCase ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCAmelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowerCAmelCase , repo_type='dataset' , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def __snake_case ( ):
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['pt']
if isinstance(lowerCAmelCase , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
__UpperCAmelCase = ', '.join(lowerCAmelCase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 396
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''rwkv'''
a__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Any=50_277 , lowerCAmelCase__ :Optional[int]=1_024 , lowerCAmelCase__ :Tuple=4_096 , lowerCAmelCase__ :Optional[int]=32 , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[str]=1E-5 , lowerCAmelCase__ :Any=0 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Optional[Any]=6 , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : str = context_length
snake_case_ : int = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : str = rescale_every
snake_case_ : Dict = use_cache
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : int = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 656
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCAmelCase_ =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ =DDPMScheduler()
UpperCAmelCase_ =AudioDiffusionPipeline(vqvae=_lowerCAmelCase , unet=self.dummy_unet , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase , steps=4 )
UpperCAmelCase_ =output.audios[0]
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase , steps=4 , return_dict=_lowerCAmelCase )
UpperCAmelCase_ =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ =DDIMScheduler()
UpperCAmelCase_ =self.dummy_vqvae_and_unet
UpperCAmelCase_ =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(raw_audio=_lowerCAmelCase , generator=_lowerCAmelCase , start_step=5 , steps=10 )
UpperCAmelCase_ =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ =self.dummy_unet_condition
UpperCAmelCase_ =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_lowerCAmelCase , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ =torch.rand((1, 1, 10) )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase , encoding=_lowerCAmelCase )
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =torch_device
UpperCAmelCase_ =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase )
UpperCAmelCase_ =output.audios[0]
UpperCAmelCase_ =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 54
|
"""simple docstring"""
from string import ascii_uppercase
_lowerCAmelCase :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Dict = 0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
_UpperCAmelCase : Optional[int] = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
_UpperCAmelCase : Any = str(UpperCamelCase__ )
new_value += actual_value
_UpperCAmelCase : Tuple = num // base
_UpperCAmelCase : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 506
| 0
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> str:
__lowerCAmelCase : Dict = []
for line in lines:
__lowerCAmelCase : Optional[int] = re.sub(r"""#.*""" , """""" , SCREAMING_SNAKE_CASE ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = """\n""".join(SCREAMING_SNAKE_CASE )
# Make a hash from all this code
__lowerCAmelCase : Dict = full_str.encode("""utf-8""" )
return shaaaa(SCREAMING_SNAKE_CASE ).hexdigest()
# get importable module names and hash for caching
_UpperCAmelCase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_UpperCAmelCase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_UpperCAmelCase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_UpperCAmelCase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 700
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_UpperCAmelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_UpperCAmelCase = 'UperNetConfig'
class snake_case_ ( nn.Module ):
def __init__( self : List[str] , _snake_case : int , _snake_case : int , _snake_case : Union[int, Tuple[int, int]] , _snake_case : Union[int, Tuple[int, int], str] = 0 , _snake_case : bool = False , _snake_case : Union[int, Tuple[int, int]] = 1 , )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : Tuple = nn.Convad(
in_channels=_snake_case , out_channels=_snake_case , kernel_size=_snake_case , padding=_snake_case , bias=_snake_case , dilation=_snake_case , )
__lowerCAmelCase : Any = nn.BatchNormad(_snake_case )
__lowerCAmelCase : Union[str, Any] = nn.ReLU()
def UpperCAmelCase__ ( self : Tuple , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.conv(_snake_case )
__lowerCAmelCase : Any = self.batch_norm(_snake_case )
__lowerCAmelCase : List[Any] = self.activation(_snake_case )
return output
class snake_case_ ( nn.Module ):
def __init__( self : Dict , _snake_case : int , _snake_case : int , _snake_case : int )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : int = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case , _snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : List[Any] = input
for layer in self.layers:
__lowerCAmelCase : List[Any] = layer(_snake_case )
return hidden_state
class snake_case_ ( nn.Module ):
def __init__( self : Union[str, Any] , _snake_case : Tuple[int, ...] , _snake_case : int , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : str = pool_scales
__lowerCAmelCase : List[Any] = align_corners
__lowerCAmelCase : Tuple = in_channels
__lowerCAmelCase : Optional[int] = channels
__lowerCAmelCase : Optional[int] = []
for i, pool_scale in enumerate(_snake_case ):
__lowerCAmelCase : List[str] = UperNetPyramidPoolingBlock(pool_scale=_snake_case , in_channels=_snake_case , channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : torch.Tensor )->List[torch.Tensor]:
'''simple docstring'''
__lowerCAmelCase : Any = []
for ppm in self.blocks:
__lowerCAmelCase : Dict = ppm(_snake_case )
__lowerCAmelCase : Any = nn.functional.interpolate(
_snake_case , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class snake_case_ ( nn.Module ):
def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : int )->Optional[Any]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Union[str, Any] = config.pool_scales # e.g. (1, 2, 3, 6)
__lowerCAmelCase : str = in_channels
__lowerCAmelCase : str = config.hidden_size
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__lowerCAmelCase : Any = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__lowerCAmelCase : List[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__lowerCAmelCase : Any = nn.ModuleList()
__lowerCAmelCase : List[Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__lowerCAmelCase : str = UperNetConvModule(_snake_case , self.channels , kernel_size=1 )
__lowerCAmelCase : Any = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
__lowerCAmelCase : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Any , _snake_case : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
if isinstance(_snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Tuple , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inputs[-1]
__lowerCAmelCase : Optional[Any] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
__lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=1 )
__lowerCAmelCase : List[Any] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase__ ( self : List[str] , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
__lowerCAmelCase : Union[str, Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCAmelCase : Optional[Any] = laterals[i - 1].shape[2:]
__lowerCAmelCase : Dict = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_snake_case , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
__lowerCAmelCase : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCAmelCase : Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
__lowerCAmelCase : Dict = torch.cat(_snake_case , dim=1 )
__lowerCAmelCase : Optional[int] = self.fpn_bottleneck(_snake_case )
__lowerCAmelCase : List[str] = self.classifier(_snake_case )
return output
class snake_case_ ( nn.Module ):
def __init__( self : Dict , _snake_case : List[Any] , _snake_case : int = 2 , _snake_case : int = 3 , _snake_case : Union[int, Tuple[int, int]] = 1 )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : List[str] = config
__lowerCAmelCase : Optional[int] = config.auxiliary_in_channels
__lowerCAmelCase : Tuple = config.auxiliary_channels
__lowerCAmelCase : Any = config.auxiliary_num_convs
__lowerCAmelCase : List[Any] = config.auxiliary_concat_input
__lowerCAmelCase : Optional[Any] = in_index
__lowerCAmelCase : Union[str, Any] = (kernel_size // 2) * dilation
__lowerCAmelCase : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_snake_case , padding=_snake_case , dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_snake_case , padding=_snake_case , dilation=_snake_case ) )
if self.num_convs == 0:
__lowerCAmelCase : Union[str, Any] = nn.Identity()
else:
__lowerCAmelCase : int = nn.Sequential(*_snake_case )
if self.concat_input:
__lowerCAmelCase : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_snake_case , padding=kernel_size // 2 )
__lowerCAmelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Any , _snake_case : Any )->int:
'''simple docstring'''
if isinstance(_snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : List[Any] , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : List[str] = encoder_hidden_states[self.in_index]
__lowerCAmelCase : List[str] = self.convs(_snake_case )
if self.concat_input:
__lowerCAmelCase : Tuple = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__lowerCAmelCase : Any = self.classifier(_snake_case )
return output
class snake_case_ ( __lowercase ):
A_ = UperNetConfig
A_ = 'pixel_values'
A_ = True
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[str] )->Optional[int]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any]=False )->str:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__lowerCAmelCase : List[Any] = value
_UpperCAmelCase = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCAmelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' ,__lowercase ,)
class snake_case_ ( __lowercase ):
def __init__( self : str , _snake_case : Optional[int] )->Union[str, Any]:
'''simple docstring'''
super().__init__(_snake_case )
__lowerCAmelCase : List[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__lowerCAmelCase : Tuple = UperNetHead(_snake_case , in_channels=self.backbone.channels )
__lowerCAmelCase : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=_snake_case , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , )->Union[tuple, SemanticSegmenterOutput]:
'''simple docstring'''
__lowerCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
__lowerCAmelCase : Any = self.backbone.forward_with_filtered_kwargs(
_snake_case , output_hidden_states=_snake_case , output_attentions=_snake_case )
__lowerCAmelCase : int = outputs.feature_maps
__lowerCAmelCase : Union[str, Any] = self.decode_head(_snake_case )
__lowerCAmelCase : str = nn.functional.interpolate(_snake_case , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_snake_case )
__lowerCAmelCase : int = None
if self.auxiliary_head is not None:
__lowerCAmelCase : List[Any] = self.auxiliary_head(_snake_case )
__lowerCAmelCase : Tuple = nn.functional.interpolate(
_snake_case , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_snake_case )
__lowerCAmelCase : Dict = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
__lowerCAmelCase : Dict = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__lowerCAmelCase : Any = loss_fct(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = loss_fct(_snake_case , _snake_case )
__lowerCAmelCase : Any = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__lowerCAmelCase : List[Any] = (logits,) + outputs[1:]
else:
__lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 240
| 0
|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = """▁"""
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = BertGenerationTokenizer
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Tuple = True
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case : Dict = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = "<s>"
snake_case : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 1002 )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
snake_case : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Any = "Hello World!"
snake_case : Any = [1_8536, 2260, 101]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case : List[str] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Tuple = " ".join(UpperCamelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
snake_case : Dict = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
snake_case : List[Any] = BertGenerationConfig()
snake_case : Dict = BertGenerationEncoder(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = {"input_ids": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 178
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( ) -> Iterator[int]:
"""simple docstring"""
snake_case : Optional[int] = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def __lowerCAmelCase ( lowercase : int = 200_0000 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 178
| 1
|
'''simple docstring'''
import math
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1_0001 ):
"""simple docstring"""
try:
lowercase_ : List[Any] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
lowercase_ : list[int] = []
lowercase_ : Dict = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
lowercase__ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowercase__ = {value: key for key, value in encode_dict.items()}
def __snake_case ( lowercase : str ):
snake_case_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def __snake_case ( lowercase : str ):
if set(lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
snake_case_ = ""
for word in coded.split():
while len(lowercase ) != 0:
decoded += decode_dict[word[:5]]
snake_case_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 508
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __snake_case ( lowercase : Dict ):
snake_case_ = {}
snake_case_ = job["started_at"]
snake_case_ = job["completed_at"]
snake_case_ = date_parser.parse(lowercase )
snake_case_ = date_parser.parse(lowercase )
snake_case_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ = start
snake_case_ = end
snake_case_ = duration_in_min
return job_info
def __snake_case ( lowercase : Tuple , lowercase : Dict=None ):
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
snake_case_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case_ = requests.get(lowercase , headers=lowercase ).json()
snake_case_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
snake_case_ = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 508
| 1
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__A : Tuple = numpy.array([0, 0])
__A : Dict = numpy.array([0.5, 0.8_66_02_54])
__A : Optional[Any] = numpy.array([1, 0])
__A : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = initial_vectors
for _ in range(__lowercase):
lowerCamelCase__ = iteration_step(__lowercase)
return vectors
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = []
for i, start_vector in enumerate(vectors[:-1]):
lowerCamelCase__ = vectors[i + 1]
new_vectors.append(__lowercase)
lowerCamelCase__ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3)
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60))
new_vectors.append(start_vector + difference_vector * 2 / 3)
new_vectors.append(vectors[-1])
return new_vectors
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = numpy.radians(__lowercase)
lowerCamelCase__ = numpy.cos(__lowercase), numpy.sin(__lowercase)
lowerCamelCase__ = numpy.array(((c, -s), (s, c)))
return numpy.dot(__lowercase , __lowercase)
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = plt.gca()
axes.set_aspect("equal")
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCamelCase__ = zip(*__lowercase)
plt.plot(__lowercase , __lowercase)
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Tuple = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 704
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__A : List[Any] = True
from torch.cuda.amp import autocast
__A : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def lowerCamelCase_ ( lowercase__ , lowercase__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCamelCase__ = logging.WARNING
if model_args.verbose_logging:
lowerCamelCase__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCamelCase__ = logging.INFO
logger.setLevel(lowercase__)
@dataclass
class lowercase :
'''simple docstring'''
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowercase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "longest"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self : Optional[int] , __lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCamelCase__ = self.feature_extractor.pad(
__lowerCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowerCamelCase__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCamelCase__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCamelCase__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCamelCase__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCamelCase__ = 1
lowerCamelCase__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCamelCase__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__lowerCamelCase , min_masks=2 , )
return batch
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *__lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , **__lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ = 0
lowerCamelCase__ = max_gumbel_temp
lowerCamelCase__ = min_gumbel_temp
lowerCamelCase__ = gumbel_temp_decay
def a__ ( self : Tuple , __lowerCamelCase : nn.Module , __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCamelCase__ = self._prepare_inputs(__lowerCamelCase )
if self.use_amp:
with autocast():
lowerCamelCase__ = self.compute_loss(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self.compute_loss(__lowerCamelCase , __lowerCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
configure_logger(lowercase__ , lowercase__)
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase__)
def prepare_dataset(lowercase__):
# check that all files have the correct sampling rate
lowerCamelCase__ , lowerCamelCase__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCamelCase__ = datasets.map(
lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCamelCase__ = vectorized_datasets.filter(
lambda lowercase__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(lowercase__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCamelCase__ = vectorized_datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCamelCase__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCamelCase__ = WavaVecaForPreTraining(lowercase__)
lowerCamelCase__ = DataCollatorForWavaVecaPretraining(model=lowercase__ , feature_extractor=lowercase__)
lowerCamelCase__ = WavaVecaPreTrainer(
model=lowercase__ , data_collator=lowercase__ , args=lowercase__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowercase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 187
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase ( _A , _A , _A , _A , _A = None , _A = None , _A = None , ) -> Optional[int]:
if config_name_or_path is None:
lowercase : Optional[int] = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowercase : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase : Dict = question_encoder_name_or_path
lowercase : List[str] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowercase : Tuple = RagConfig.from_pretrained(_A )
lowercase : Dict = AutoConfig.from_pretrained(_A )
lowercase : Optional[Any] = AutoConfig.from_pretrained(_A )
lowercase : int = gen_config
lowercase : Optional[Any] = question_encoder_config
lowercase : str = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowercase : Any = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 264
|
from __future__ import annotations
def _a ( UpperCAmelCase ) -> None:
"""simple docstring"""
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
"""simple docstring"""
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase__ : List[Any] = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
lowerCamelCase__ : str = False
_A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 315
| 0
|
"""simple docstring"""
from copy import deepcopy
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
UpperCAmelCase : List[Any] = size
UpperCAmelCase : List[Any] = [0] * size
elif arr is not None:
self.init(_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Either arr or size must be specified""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = deepcopy(_SCREAMING_SNAKE_CASE )
for i in range(1 , self.size ):
UpperCAmelCase : Optional[Any] = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE ( self ) -> list[int]:
'''simple docstring'''
UpperCAmelCase : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCAmelCase : int = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return index - (index & (-index))
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase : Dict = self.next_(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
self.add(_SCREAMING_SNAKE_CASE , value - self.get(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if right == 0:
return 0
UpperCAmelCase : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase : int = self.prev(_SCREAMING_SNAKE_CASE )
return result
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.prefix(_SCREAMING_SNAKE_CASE ) - self.prefix(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.query(_SCREAMING_SNAKE_CASE , index + 1 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ):
UpperCAmelCase : dict[int, int] = {}
UpperCAmelCase : str = 2
while True:
UpperCAmelCase : List[str] = factor_map.pop(UpperCamelCase , UpperCamelCase )
if factor:
UpperCAmelCase : Dict = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase : Tuple = factor
else:
UpperCAmelCase : Optional[int] = prime
yield prime
prime += 1
def _snake_case ( UpperCamelCase : float = 1e1_0 ):
UpperCAmelCase : Tuple = sieve()
UpperCAmelCase : Optional[Any] = 1
while True:
UpperCAmelCase : List[str] = next(UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 359
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: Optional[Any] ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase: List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 607
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
A : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 636
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A__ ( A ):
"""simple docstring"""
_lowercase : Any = '''yolos'''
def __init__( self : int , A_ : Optional[Any]=7_6_8 , A_ : Any=1_2 , A_ : List[Any]=1_2 , A_ : Any=3_0_7_2 , A_ : Optional[int]="gelu" , A_ : List[str]=0.0 , A_ : Dict=0.0 , A_ : str=0.02 , A_ : Union[str, Any]=1E-12 , A_ : Optional[Any]=[5_1_2, 8_6_4] , A_ : List[Any]=1_6 , A_ : Dict=3 , A_ : int=True , A_ : Any=1_0_0 , A_ : Tuple=True , A_ : Any=False , A_ : str=1 , A_ : int=5 , A_ : Any=2 , A_ : Optional[int]=5 , A_ : str=2 , A_ : Dict=0.1 , **A_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Tuple = num_detection_tokens
_lowerCAmelCase : Union[str, Any] = use_mid_position_embeddings
_lowerCAmelCase : Optional[int] = auxiliary_loss
# Hungarian matcher
_lowerCAmelCase : Optional[int] = class_cost
_lowerCAmelCase : Tuple = bbox_cost
_lowerCAmelCase : str = giou_cost
# Loss coefficients
_lowerCAmelCase : Tuple = bbox_loss_coefficient
_lowerCAmelCase : Dict = giou_loss_coefficient
_lowerCAmelCase : Optional[int] = eos_coefficient
class A__ ( A ):
"""simple docstring"""
_lowercase : Union[str, Any] = version.parse('''1.11''' )
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return 1_2
| 503
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503
| 1
|
'''simple docstring'''
from collections import defaultdict
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a__ : Optional[int] , a__ : Union[str, Any] ):
UpperCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
UpperCAmelCase = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase = (1 << len(a__ )) - 1
def __snake_case ( self : List[Any] , a__ : Union[str, Any] , a__ : List[str] ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase = self.count_ways_until(a__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCAmelCase = total_ways_util
return self.dp[mask][task_no]
def __snake_case ( self : int , a__ : Optional[Any] ):
# Store the list of persons for each task
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
a__ : List[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a__ : List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 51
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase( ) -> int:
A_ = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
A_ = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
A_ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ ,'func' ):
parser.print_help()
exit(1 )
# Run
A_ = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 366
| 0
|
from __future__ import annotations
import numpy as np
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = np.shape(a )
if rows != columns:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'\'table\' has to be of square shaped array but got a '
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(a )
SCREAMING_SNAKE_CASE_ : List[str] = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros((rows, columns) )
for i in range(a ):
for j in range(a ):
SCREAMING_SNAKE_CASE_ : Tuple = sum(lower[i][k] * upper[k][j] for k in range(a ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
SCREAMING_SNAKE_CASE_ : List[Any] = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ : Dict = 1
for j in range(a , a ):
SCREAMING_SNAKE_CASE_ : Any = sum(lower[i][k] * upper[k][j] for k in range(a ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
def A_ ( a , a ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ : Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
"""simple docstring"""
a_ : List[str] = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(__A , __A )
UpperCAmelCase_ : Tuple = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
a_ : Dict = list(s_dict.keys() )
for key in keys:
a_ : Union[str, Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a_ : List[str] = new_key.replace(__A , __A )
print(F"""{key} -> {new_key}""" )
a_ : Any = s_dict.pop(__A )
return s_dict
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ : List[Any] = emb.weight.shape
a_ : int = nn.Linear(__A , __A , bias=__A )
a_ : Union[str, Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(__A , exist_ok=__A )
a_ : Optional[int] = os.path.basename(__A )
a_ : List[str] = url.split('/' )[-2]
a_ : Optional[int] = os.path.join(__A , __A )
if os.path.exists(__A ) and not os.path.isfile(__A ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(__A ):
a_ : Optional[Any] = open(__A , 'rb' ).read()
if hashlib.shaaaa(__A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(__A ) as source, open(__A , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=__A , unit_divisor=10_24 ) as loop:
while True:
a_ : Union[str, Any] = source.read(81_92 )
if not buffer:
break
output.write(__A )
loop.update(len(__A ) )
a_ : str = open(__A , 'rb' ).read()
if hashlib.shaaaa(__A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Tuple ) -> List[str]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
a_ : int = _download(_MODELS[checkpoint_path] )
else:
a_ : Optional[Any] = torch.load(__A , map_location='cpu' )
a_ : Optional[int] = original_checkpoint['dims']
a_ : str = original_checkpoint['model_state_dict']
a_ : Union[str, Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(__A )
rename_keys(__A )
a_ : int = True
a_ : Tuple = state_dict['decoder.layers.0.fc1.weight'].shape[0]
a_ : List[str] = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=__A , decoder_ffn_dim=__A , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
a_ : Dict = WhisperForConditionalGeneration(__A )
a_ , a_ : Union[str, Any] = model.model.load_state_dict(__A , strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a_ : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a_ : Optional[int] = proj_out_weights
model.save_pretrained(__A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 570
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__lowerCAmelCase = str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__lowerCAmelCase = str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
__lowerCAmelCase = binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if number >= 0: # Get binary representation of positive number
__lowerCAmelCase = '''0''' + str(bin(__A ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
__lowerCAmelCase = len(bin(__A )[3:] ) # Find 2's complement of number
__lowerCAmelCase = bin(abs(__A ) - (1 << binary_number_length) )[3:]
__lowerCAmelCase = (
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class a__ ( snake_case__ ):
def __init__( self , *_A , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE( self , _A=None ):
"""simple docstring"""
__lowerCAmelCase = {}
if top_k is not None:
__lowerCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self , _A , **_A ):
"""simple docstring"""
return super().__call__(_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = load_image(_A )
__lowerCAmelCase = self.image_processor(images=_A , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.model(**_A )
return model_outputs
def __SCREAMING_SNAKE_CASE( self , _A , _A=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(_A )
elif self.framework == "tf":
__lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowerCAmelCase = tf.math.top_k(_A , k=_A )
__lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowerCAmelCase = scores.tolist()
__lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )]
| 552
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a : List[str] = logging.getLogger(__name__)
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
SCREAMING_SNAKE_CASE = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
__A = import_module("""tasks""" )
try:
__A = getattr(__lowercase , model_args.task_type )
__A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__A = token_classification_task.get_labels(data_args.labels )
__A = dict(enumerate(__lowercase ) )
__A = len(__lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__A = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__A = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowercase : np.ndarray , __lowercase : np.ndarray ) -> Tuple[List[int], List[int]]:
__A = np.argmax(__lowercase , axis=2 )
__A , __A = preds.shape
__A = [[] for _ in range(__lowercase )]
__A = [[] for _ in range(__lowercase )]
for i in range(__lowercase ):
for j in range(__lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowercase : EvalPrediction ) -> Dict:
__A , __A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowercase , __lowercase ),
"precision": precision_score(__lowercase , __lowercase ),
"recall": recall_score(__lowercase , __lowercase ),
"f1": fa_score(__lowercase , __lowercase ),
}
# Data collator
__A = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowercase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __lowercase , __lowercase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__lowercase )
# Predict
if training_args.do_predict:
__A = TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__A , __A , __A = trainer.predict(__lowercase )
__A , __A = align_predictions(__lowercase , __lowercase )
__A = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowercase , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , __lowercase , __lowercase )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
__A = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowercase , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase )
return results
def _SCREAMING_SNAKE_CASE ( __lowercase : Any ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 637
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = SqueezeBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
| 637
| 1
|
import itertools
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowerCAmelCase : Optional[Any] = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 240
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 240
| 1
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
"""simple docstring"""
lowercase = None
def UpperCamelCase_ ( A__ : "pyspark.sql.DataFrame" , A__ : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ : int = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ : Optional[int] = df_with_partition_id.select("""*""" ).where(f'part_id = {partition_id}' ).drop("""part_id""" )
lowerCAmelCase_ : int = partition_df.collect()
lowerCAmelCase_ : Dict = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=None , ) -> Any:
lowerCAmelCase_ : Any = df
lowerCAmelCase_ : List[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ) -> Dict:
yield from self.generate_examples_fn()
def __lowercase ( self : str , lowerCamelCase : Dict ) -> int:
lowerCAmelCase_ : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
def __lowercase ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase , lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
@property
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
"""simple docstring"""
lowercase = SparkConfig
def __init__( self : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] = None , lowerCamelCase : Tuple = None , **lowerCamelCase : List[Any] , ) -> str:
import pyspark
lowerCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ : Optional[Any] = df
lowerCAmelCase_ : List[str] = working_dir
super().__init__(
cache_dir=lowerCamelCase , config_name=str(self.df.semanticHash() ) , **lowerCamelCase , )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowercase ( self : Any ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[int] ) -> Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowercase ( self : Optional[int] , lowerCamelCase : Optional[int] ) -> Any:
import pyspark
def get_arrow_batch_size(lowerCamelCase : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ : List[str] = self.df.count()
lowerCAmelCase_ : int = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ : Optional[int] = (
self.df.limit(lowerCamelCase )
.repartition(1 )
.mapInArrow(lowerCamelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ : List[str] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ : int = min(lowerCamelCase , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ : str = self.df.repartition(lowerCamelCase )
def __lowercase ( self : Any , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Dict , ) -> Any:
import pyspark
lowerCAmelCase_ : Optional[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ : Optional[Any] = os.path.join(self._working_dir , os.path.basename(lowerCamelCase ) ) if self._working_dir else fpath
lowerCAmelCase_ : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ : Any = self.config.features
lowerCAmelCase_ : str = self._writer_batch_size
lowerCAmelCase_ : List[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ : int = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ : Union[str, Any] = next(lowerCamelCase , lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = writer_class(
features=lowerCamelCase , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ : List[Any] = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase )
if writer._num_bytes > 0:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase ) ):
lowerCAmelCase_ : Tuple = os.path.join(os.path.dirname(lowerCamelCase ) , os.path.basename(lowerCamelCase ) )
shutil.move(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = (
self.df.mapInArrow(lowerCamelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowercase ( self : str , lowerCamelCase : int , lowerCamelCase : Optional[Any] = "arrow" , lowerCamelCase : List[Any] = None , lowerCamelCase : Dict = None , **lowerCamelCase : List[Any] , ) -> Any:
self._validate_cache_dir()
lowerCAmelCase_ : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ : List[Any] = os.path.join if is_local else posixpath.join
lowerCAmelCase_ : Any = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ : Optional[int] = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
lowerCAmelCase_ : Union[str, Any] = path_join(self._output_dir , lowerCamelCase )
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Optional[int] = []
for task_id, content in self._prepare_split_single(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase )
lowerCAmelCase_ : int = total_num_examples
lowerCAmelCase_ : str = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
lowerCAmelCase_ : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ : Optional[int] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , ):
rename(
lowerCamelCase , fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace("""TTTTT-SSSSS""" , F'{global_shard_id:05d}' ).replace("""NNNNN""" , F'{total_shards:05d}' ) , )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase_, lowerCAmelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase , len(lowerCamelCase ) ).map(lambda lowerCamelCase : _rename_shard(*lowerCamelCase ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace(lowerCamelCase , """""" ) , )
def __lowercase ( self : List[Any] , lowerCamelCase : Any , ) -> str:
return SparkExamplesIterable(self.df )
| 275
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : List[str] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_a : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_a : Tuple = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_a : Dict = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
UpperCAmelCase = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase = VideoClassificationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ , top_k=2 )
UpperCAmelCase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> List[Any]:
for example in examples:
UpperCAmelCase = video_classifier(lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
] , )
@require_torch
def _UpperCamelCase ( self : Any ) -> List[Any]:
UpperCAmelCase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase = VideoMAEFeatureExtractor(
size={"shortest_edge": 1_0} , crop_size={"height": 1_0, "width": 1_0} )
UpperCAmelCase = pipeline(
"video-classification" , model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , frame_sampling_rate=4 )
UpperCAmelCase = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase = video_classifier(lowerCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , )
UpperCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
] , )
@require_tf
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
pass
| 717
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1
| 0
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : List[str] = 0
@slow
def _snake_case ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def _snake_case ( self ):
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _snake_case ( self ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase_ , "vocab.txt" ) )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="bert" , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase_ , "merges.txt" ) )
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="gpt2" , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase_ , "vocab.txt" ) )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="bert" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase_ , "merges.txt" ) )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="gpt2" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def _snake_case ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def _snake_case ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def _snake_case ( self ):
__UpperCAmelCase : str = TOKENIZER_MAPPING.values()
__UpperCAmelCase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def _snake_case ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , UpperCamelCase_ )
@require_tokenizers
def _snake_case ( self ):
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=UpperCamelCase_ )
__UpperCAmelCase : int = 'Hello, world. How are you?'
__UpperCAmelCase : List[str] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual("[UNK]" , tokens[0] )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : int = get_tokenizer_config("bert-base-cased" )
__UpperCAmelCase : List[Any] = config.pop("_commit_hash" , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCAmelCase : str = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : List[str] = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def _snake_case ( self ):
try:
AutoConfig.register("custom" , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
__UpperCAmelCase : Any = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _snake_case ( self ):
try:
AutoConfig.register("custom" , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Optional[int] = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def _snake_case ( self ):
class __A (snake_case_ ):
snake_case :Dict = False
class __A (snake_case_ ):
snake_case :List[str] = NewTokenizer
snake_case :List[str] = False
try:
AutoConfig.register("custom" , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : str = AutoTokenizer.from_pretrained("bert-base" )
def _snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision="aaaaaa" )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 168
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _lowercase ( snake_case_ ):
lowercase = ['pixel_values']
def __init__( self : Optional[int] , snake_case : bool = True , snake_case : int = 3_2 , snake_case : List[str]=PILImageResampling.BILINEAR , snake_case : bool = True , **snake_case : Tuple , ) -> None:
"""simple docstring"""
UpperCamelCase_ : int = do_resize
UpperCamelCase_ : Union[str, Any] = do_rescale
UpperCamelCase_ : Any = size_divisor
UpperCamelCase_ : str = resample
super().__init__(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : np.ndarray , snake_case : int , snake_case : Optional[Any] , snake_case : Optional[ChannelDimension] = None , **snake_case : str ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Dict = get_image_size(snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase_ : Optional[int] = height // size_divisor * size_divisor
UpperCamelCase_ : Union[str, Any] = width // size_divisor * size_divisor
UpperCamelCase_ : Any = resize(snake_case , (new_h, new_w) , resample=snake_case , data_format=snake_case , **snake_case )
return image
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : np.ndarray , snake_case : float , snake_case : Optional[ChannelDimension] = None , **snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , snake_case : Optional[bool] = None , snake_case : Optional[int] = None , snake_case : str=None , snake_case : Optional[bool] = None , snake_case : Optional[Union[TensorType, str]] = None , snake_case : ChannelDimension = ChannelDimension.FIRST , **snake_case : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase_ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ : str = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase_ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
UpperCamelCase_ : int = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
UpperCamelCase_ : Optional[int] = [to_numpy_array(snake_case ) for img in images]
if do_resize:
UpperCamelCase_ : List[str] = [self.resize(snake_case , size_divisor=snake_case , resample=snake_case ) for image in images]
if do_rescale:
UpperCamelCase_ : List[str] = [self.rescale(snake_case , scale=1 / 2_5_5 ) for image in images]
UpperCamelCase_ : Tuple = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
UpperCamelCase_ : Dict = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 417
| 0
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return number | (1 << position)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return number & ~(1 << position)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return number ^ (1 << position)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__a = "naver-clova-ix/donut-base"
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : str ):
snake_case__ : Optional[int] = DonutProcessor.from_pretrained(snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[int] = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
snake_case__ : Any = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
snake_case__ : Optional[int] = self.processor.tokenajson(snake_case_ )
self.assertDictEqual(snake_case_ , snake_case_ )
| 301
| 1
|
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=None , snake_case="uniform_average" , snake_case=True ):
lowercase = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 84
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 687
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 198
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""image_processor""", """tokenizer"""]
snake_case_ : str = """ChineseCLIPImageProcessor"""
snake_case_ : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
_snake_case : Tuple = kwargs.pop("""feature_extractor""")
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_snake_case : Dict = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if text is not None and images is not None:
_snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase , )
return self.image_processor_class
| 198
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a_ ( __a ):
__A = '''decision_transformer'''
__A = ['''past_key_values''']
__A = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , lowercase : str=17 , lowercase : Union[str, Any]=4 , lowercase : List[str]=128 , lowercase : int=4_096 , lowercase : Dict=True , lowercase : str=1 , lowercase : Any=1_024 , lowercase : int=3 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=None , lowercase : int="relu" , lowercase : str=0.1 , lowercase : Tuple=0.1 , lowercase : List[Any]=0.1 , lowercase : List[Any]=1e-5 , lowercase : Optional[int]=0.02 , lowercase : List[str]=True , lowercase : Optional[Any]=True , lowercase : List[Any]=50_256 , lowercase : Any=50_256 , lowercase : Any=False , lowercase : Union[str, Any]=False , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :Dict = state_dim
lowercase_ :List[str] = act_dim
lowercase_ :List[str] = hidden_size
lowercase_ :List[str] = max_ep_len
lowercase_ :Optional[Any] = action_tanh
lowercase_ :Optional[Any] = vocab_size
lowercase_ :Optional[int] = n_positions
lowercase_ :Optional[Any] = n_layer
lowercase_ :int = n_head
lowercase_ :int = n_inner
lowercase_ :Optional[Any] = activation_function
lowercase_ :int = resid_pdrop
lowercase_ :str = embd_pdrop
lowercase_ :Dict = attn_pdrop
lowercase_ :List[Any] = layer_norm_epsilon
lowercase_ :int = initializer_range
lowercase_ :List[str] = scale_attn_weights
lowercase_ :Dict = use_cache
lowercase_ :Union[str, Any] = scale_attn_by_inverse_layer_idx
lowercase_ :Dict = reorder_and_upcast_attn
lowercase_ :str = bos_token_id
lowercase_ :Tuple = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 172
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : UNetaDModel
__lowercase : KarrasVeScheduler
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__)
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__SCREAMING_SNAKE_CASE = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
__SCREAMING_SNAKE_CASE = self.scheduler.schedule[t]
__SCREAMING_SNAKE_CASE = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__SCREAMING_SNAKE_CASE = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__SCREAMING_SNAKE_CASE = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["""derivative"""] , )
__SCREAMING_SNAKE_CASE = step_output.prev_sample
__SCREAMING_SNAKE_CASE = (sample / 2 + 0.5).clamp(0 , 1)
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 155
| 0
|
def A_( A , A , A ):
UpperCAmelCase_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def A_( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from __future__ import annotations
def A_( A ):
if not nums:
raise ValueError("""List is empty""" )
return sum(A ) / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a , a ) -> float:
'''simple docstring'''
__magic_name__ = sorted(numsa + numsa )
__magic_name__ , __magic_name__ = divmod(len(a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 432
|
'''simple docstring'''
def UpperCamelCase ( a , a ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 432
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ = 1_0_0_0 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 568
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Any = FunnelTokenizer
A_ : Dict = FunnelTokenizerFast
A_ : Dict = True
A_ : Tuple = True
def _A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Union[str, Any] , **a__ : str ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Optional[int] , **a__ : str ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Tuple , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "UNwant\u00E9d,running"
lowerCAmelCase__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
lowerCAmelCase__ : Tuple = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ : str = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ : Optional[int] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 568
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Any = ['transformers', 'torch', 'note_seq']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 359
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : List[str] =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = None
lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = """\n""".join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase = diff.replace(__lowerCAmelCase , """""" )
self.assertEqual(__lowerCAmelCase , """""" )
def A__ ( self ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _A ( lowerCAmelCase ):
snake_case__ : Any = False
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
else:
lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
lowercase = re.findall("""({.+})""" , __lowerCAmelCase )
lowercase = [r for r in results if """accuracy""" in r][-1]
lowercase = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , """tracking""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 359
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __magic_name__ :
def __init__( self , __magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 1_3
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = 9_9
_lowerCAmelCase = 3_2
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 3_7
_lowerCAmelCase = 'gelu'
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 5_1_2
_lowerCAmelCase = 1_6
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFDistilBertModel(config=__magic_name__ )
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase = model(__magic_name__ )
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFDistilBertForMaskedLM(config=__magic_name__ )
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFDistilBertForQuestionAnswering(config=__magic_name__ )
_lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFDistilBertForSequenceClassification(__magic_name__ )
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFDistilBertForMultipleChoice(__magic_name__ )
_lowerCAmelCase = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFDistilBertForTokenClassification(__magic_name__ )
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase : List[str] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : str = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDistilBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , dim=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_lowerCAmelCase = TFDistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(__magic_name__ )[0]
_lowerCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __magic_name__ )
_lowerCAmelCase = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
| 309
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowerCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sgugger/tiny-distilbert-classification'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , torchscript=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , fpaa=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
# set architectures equal to `None`
_lowerCAmelCase = None
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__magic_name__ , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tinier_bart'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tinier_bart'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__magic_name__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__magic_name__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__magic_name__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(__magic_name__ , 'env.csv' ) , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'env.csv' ) ).exists() )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , 'sequential' ) )
self.assertTrue(hasattr(__magic_name__ , 'cumulative' ) )
self.assertTrue(hasattr(__magic_name__ , 'current' ) )
self.assertTrue(hasattr(__magic_name__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , 'log.txt' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , 'log.txt' ) ).exists() )
| 309
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowercase : str =logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( __lowercase ):
def __init__( self: Any , _lowerCAmelCase: WhisperForConditionalGeneration , _lowerCAmelCase: WhisperProcessor , _lowerCAmelCase: AutoencoderKL , _lowerCAmelCase: CLIPTextModel , _lowerCAmelCase: CLIPTokenizer , _lowerCAmelCase: UNetaDConditionModel , _lowerCAmelCase: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowerCAmelCase: StableDiffusionSafetyChecker , _lowerCAmelCase: CLIPImageProcessor , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowerCAmelCase , speech_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Optional[Union[str, int]] = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
UpperCAmelCase_ =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(_lowerCAmelCase )
@torch.no_grad()
def __call__( self: str , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[int]=1_6000 , _lowerCAmelCase: int = 512 , _lowerCAmelCase: int = 512 , _lowerCAmelCase: int = 50 , _lowerCAmelCase: float = 7.5 , _lowerCAmelCase: Optional[Union[str, List[str]]] = None , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: Optional[torch.Generator] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase: int = 1 , **_lowerCAmelCase: int , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.speech_processor.feature_extractor(
_lowerCAmelCase , return_tensors="pt" , sampling_rate=_lowerCAmelCase ).input_features.to(self.device )
UpperCAmelCase_ =self.speech_model.generate(_lowerCAmelCase , max_length=48_0000 )
UpperCAmelCase_ =self.speech_processor.tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , normalize=_lowerCAmelCase )[
0
]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =1
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =len(_lowerCAmelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowerCAmelCase )}.' )
# get prompt text embeddings
UpperCAmelCase_ =self.tokenizer(
_lowerCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCAmelCase_ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
UpperCAmelCase_ =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =text_embeddings.shape
UpperCAmelCase_ =text_embeddings.repeat(1 , _lowerCAmelCase , 1 )
UpperCAmelCase_ =text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ =42
if negative_prompt is None:
UpperCAmelCase_ =[""] * batch_size
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !='
F' {type(_lowerCAmelCase )}.' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
UpperCAmelCase_ =negative_prompt
UpperCAmelCase_ =text_input_ids.shape[-1]
UpperCAmelCase_ =self.tokenizer(
_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" , )
UpperCAmelCase_ =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ =uncond_embeddings.shape[1]
UpperCAmelCase_ =uncond_embeddings.repeat(1 , _lowerCAmelCase , 1 )
UpperCAmelCase_ =uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_ =torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device="cpu" , dtype=_lowerCAmelCase ).to(
self.device )
else:
UpperCAmelCase_ =torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCAmelCase_ =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_ =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ ={}
if accepts_eta:
UpperCAmelCase_ =eta
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ =self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
UpperCAmelCase_ =self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.chunk(2 )
UpperCAmelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ =self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =1 / 0.1_82_15 * latents
UpperCAmelCase_ =self.vae.decode(_lowerCAmelCase ).sample
UpperCAmelCase_ =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 54
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :bool = True ):
SCREAMING_SNAKE_CASE : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : int = model
SCREAMING_SNAKE_CASE : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , '''forward''' )
SCREAMING_SNAKE_CASE : Tuple = model.__dict__.pop('''_original_forward''' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE : Tuple = forward
if getattr(_SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : List[Any] = model
SCREAMING_SNAKE_CASE : int = compiled_model
return model
def __lowercase ():
PartialState().wait_for_everyone()
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __lowercase (**_SCREAMING_SNAKE_CASE :str ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE : str = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
if not hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
SCREAMING_SNAKE_CASE : str = getattr(_SCREAMING_SNAKE_CASE , '''__class__''' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :str ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Dict = value
return destination
def __lowercase (_SCREAMING_SNAKE_CASE :int = None ):
if port is None:
SCREAMING_SNAKE_CASE : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 507
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] =R"""\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"""
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Union[str, Any]:
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = None ) -> str:
lowerCAmelCase_ : Dict = max_length
lowerCAmelCase_ : int = max_position_embeddings
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Optional[Any]:
lowerCAmelCase_ : int = input_ids.shape[-1]
lowerCAmelCase_ : Tuple = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> int:
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , __lowerCAmelCase , )
lowerCAmelCase_ : Any = start_length
lowerCAmelCase_ : Dict = max_new_tokens
lowerCAmelCase_ : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Tuple:
return input_ids.shape[-1] >= self.max_length
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = None ) -> Any:
lowerCAmelCase_ : Dict = max_time
lowerCAmelCase_ : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Dict:
return time.time() - self.initial_timestamp > self.max_time
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Tuple:
return any(criteria(__lowerCAmelCase , __lowerCAmelCase ) for criteria in self )
@property
def lowercase_ ( self ) -> Dict:
for stopping_criterium in self:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return stopping_criterium.max_length
return None
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> StoppingCriteriaList:
lowerCAmelCase_ : List[Any] = stopping_criteria.max_length
lowerCAmelCase_ : List[Any] = deepcopy(__snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__snake_case ) )
return new_stopping_criteria
| 707
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
| 619
| 0
|
"""simple docstring"""
from math import pow
def __lowercase ( snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,) ->tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__A : str = int(pow(snake_case_ ,snake_case_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__A , __A : Optional[int] = backtrack(
snake_case_ ,snake_case_ ,current_number + 1 ,snake_case_ ,snake_case_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__A , __A : Optional[int] = backtrack(
snake_case_ ,snake_case_ ,current_number + 1 ,snake_case_ ,snake_case_ )
return current_sum, solutions_count
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int:
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(snake_case_ ,snake_case_ ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """poolformer"""
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=16 , __lowerCamelCase=16 , __lowerCamelCase=3 , __lowerCamelCase=4.0 , __lowerCamelCase=[2, 2, 6, 2] , __lowerCamelCase=[64, 128, 320, 512] , __lowerCamelCase=[7, 3, 3, 3] , __lowerCamelCase=[4, 2, 2, 2] , __lowerCamelCase=[2, 1, 1, 1] , __lowerCamelCase=4 , __lowerCamelCase=0.0 , __lowerCamelCase="gelu" , __lowerCamelCase=True , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , **__lowerCamelCase , ):
'''simple docstring'''
__A : str = num_channels
__A : List[str] = patch_size
__A : str = stride
__A : Any = padding
__A : Any = pool_size
__A : Dict = hidden_sizes
__A : Optional[Any] = mlp_ratio
__A : Any = depths
__A : List[str] = patch_sizes
__A : Union[str, Any] = strides
__A : List[str] = num_encoder_blocks
__A : Optional[int] = drop_path_rate
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = use_layer_scale
__A : List[Any] = layer_scale_init_value
__A : List[Any] = initializer_range
super().__init__(**__lowerCamelCase )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 2e-3
| 177
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : Any = 25_0004
a : Dict = 25_0020
@require_sentencepiece
@require_tokenizers
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = MBartTokenizer
snake_case_ = MBartTokenizerFast
snake_case_ = True
snake_case_ = True
def A_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = MBartTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : str ):
snake_case_ = MBartTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def A_ ( self : Optional[Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
snake_case_ = "facebook/mbart-large-en-ro"
snake_case_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case_ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def A_ ( cls : Tuple ):
snake_case_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case_ = 1
return cls
def A_ ( self : Tuple ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
def A_ ( self : Optional[int] ):
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def A_ ( self : str ):
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
snake_case_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
snake_case_ = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def A_ ( self : Optional[int] ):
snake_case_ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowercase_ )
snake_case_ = 10
snake_case_ = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def A_ ( self : Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0026, 25_0001] )
def A_ ( self : List[str] ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
snake_case_ = MBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def A_ ( self : Union[str, Any] ):
snake_case_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='''pt''' )
snake_case_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A_ ( self : Optional[Any] ):
snake_case_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def A_ ( self : List[str] ):
snake_case_ = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='''pt''' )
snake_case_ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='''pt''' )
snake_case_ = targets['''input_ids''']
snake_case_ = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A_ ( self : Any ):
snake_case_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 714
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a : Dict = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> int:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
snake_case_ = os.path.abspath(__UpperCAmelCase )
logger.info(F"Loading PyTorch weights from {pt_path}" )
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
snake_case_ = convert_pytorch_state_dict_to_flax(__UpperCAmelCase, __UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case_ = convert_pytorch_sharded_state_dict_to_flax(__UpperCAmelCase, __UpperCAmelCase )
return flax_state_dict
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(__UpperCAmelCase ) -> bool:
return len(set(__UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case_ = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case_ = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case_ = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case_ = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
snake_case_ = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
snake_case_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case_ = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case_ = pt_tuple_key[-2] + '''_v'''
if name is not None:
snake_case_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case_ = flax_model.params['''params''']
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(__UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__UpperCAmelCase )
snake_case_ = {}
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ ,snake_case_ = rename_key_and_reshape_tensor(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case_ = jnp.asarray(__UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
import torch
# Load the index
snake_case_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case_ = torch.load(__UpperCAmelCase )
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flax_model.params['''params''']
snake_case_ = flatten_dict(__UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(__UpperCAmelCase )
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ ,snake_case_ = rename_key_and_reshape_tensor(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case_ = jnp.asarray(__UpperCAmelCase )
continue
if "var" in flax_key[-1]:
snake_case_ = jnp.asarray(__UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = os.path.abspath(__UpperCAmelCase )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
snake_case_ = getattr(__UpperCAmelCase, '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__UpperCAmelCase, '''rb''' ) as state_f:
try:
snake_case_ = from_bytes(__UpperCAmelCase, state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa, __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
snake_case_ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, __UpperCAmelCase )
snake_case_ = flatten_dict(__UpperCAmelCase )
snake_case_ = pt_model.state_dict()
snake_case_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
snake_case_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case_ = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__UpperCAmelCase ) not in pt_model_dict:
# conv layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = jnp.transpose(__UpperCAmelCase, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCAmelCase ) not in pt_model_dict:
# linear layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
snake_case_ = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case_ = '''.'''.join(__UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case_ = key.split('''.''' )
snake_case_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case_ = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case_ = key_components[-2] + '''_v'''
if name is not None:
snake_case_ = key_components[:-3] + [name]
snake_case_ = '''.'''.join(__UpperCAmelCase )
snake_case_ = key
if flax_key in special_pt_names:
snake_case_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase, np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
snake_case_ = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(__UpperCAmelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 593
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def __UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
__A =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__A =dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__A ={'''unk_token''': '''<unk>'''}
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
__A =BartphoTokenizer(lowercase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A ='''This is a là test'''
__A ='''This is a<unk><unk> test'''
return input_text, output_text
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =BartphoTokenizer(lowercase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__A ='''This is a là test'''
__A ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
__A =tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
__A =tokens + [tokenizer.unk_token]
__A =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
| 184
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184
| 1
|
'''simple docstring'''
import math
_A: Tuple = 10
_A: Union[str, Any] = 7
_A: Optional[Any] = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCAmelCase ( _lowerCAmelCase = 20 )-> str:
__UpperCAmelCase = math.comb(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCAmelCase )
__UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 617
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_A: str = """Create a default config file for Accelerate with only a few flags set."""
def _lowerCAmelCase ( _lowerCAmelCase="no" , _lowerCAmelCase = default_json_config_file , _lowerCAmelCase = False )-> List[Any]:
__UpperCAmelCase = Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
__UpperCAmelCase = num_gpus
__UpperCAmelCase = False
if num_gpus > 1:
__UpperCAmelCase = 'MULTI_GPU'
else:
__UpperCAmelCase = 'NO'
elif is_xpu_available() and use_xpu:
__UpperCAmelCase = torch.xpu.device_count()
__UpperCAmelCase = num_xpus
__UpperCAmelCase = False
if num_xpus > 1:
__UpperCAmelCase = 'MULTI_XPU'
else:
__UpperCAmelCase = 'NO'
elif is_npu_available():
__UpperCAmelCase = torch.npu.device_count()
__UpperCAmelCase = num_npus
__UpperCAmelCase = False
if num_npus > 1:
__UpperCAmelCase = 'MULTI_NPU'
else:
__UpperCAmelCase = 'NO'
else:
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = 1
__UpperCAmelCase = 'NO'
__UpperCAmelCase = ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
__UpperCAmelCase = parser.add_parser('default' , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> Union[str, Any]:
__UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 617
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[Any] = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
a__ : Dict = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
a__ : int = logging.get_logger(__name__)
class __snake_case ( __magic_name__ ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 368
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ : int = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Tuple = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = set()
UpperCAmelCase__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
return pairs
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] ,A : Any ,A : Dict ,A : Optional[Any]="replace" ,A : Dict="<s>" ,A : str="</s>" ,A : str="</s>" ,A : Dict="<s>" ,A : List[str]="<unk>" ,A : Union[str, Any]="<pad>" ,A : Any="<mask>" ,A : str=False ,**A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,)
with open(A ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase__ : List[str] = bytes_to_unicode()
UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(A ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowercase ( self : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(A )
UpperCAmelCase__ : int = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = 0
while i < len(A ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(A ,A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : int = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[str] = tuple(A )
UpperCAmelCase__ : str = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase__ : str = get_pairs(A )
UpperCAmelCase__ : int = """ """.join(A )
UpperCAmelCase__ : List[str] = word
return word
def __lowercase ( self : Optional[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat ,A ):
UpperCAmelCase__ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
return self.encoder.get(A ,self.encoder.get(self.unk_token ) )
def __lowercase ( self : Dict ,A : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(A )
def __lowercase ( self : Optional[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = """""".join(A )
UpperCAmelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __lowercase ( self : Optional[int] ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Any = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : List[str] = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" )
UpperCAmelCase__ : Any = 0
with open(A ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ : Optional[int] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowercase ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Any ,A : str ,A : List[Any]=False ,**A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Dict = """ """ + text
return (text, kwargs)
def __lowercase ( self : Dict ,A : Union[Dict[str, EncodedInput], BatchEncoding] ,A : Optional[int] = None ,A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,A : Optional[int] = None ,A : Optional[bool] = None ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._pad(
encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase__ : List[Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 194
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase :
def __init__(self : Any , _A : Tuple , _A : Any=13 , _A : Union[str, Any]=7 , _A : Dict=True , _A : Any=True , _A : str=True , _A : Tuple=True , _A : Dict=99 , _A : int=32 , _A : int=5 , _A : str=4 , _A : Optional[int]=4 , _A : List[str]="gelu" , _A : List[str]=0.0 , _A : str=0.1 , _A : str=True , _A : Any=5_12 , _A : List[str]=16 , _A : List[str]=2 , _A : Tuple=0.02 , _A : Any=3 , _A : List[Any]=4 , _A : int=None , ) -> Optional[Any]:
__snake_case : List[str] = parent
__snake_case : str = batch_size
__snake_case : int = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : int = use_input_mask
__snake_case : List[str] = use_token_type_ids
__snake_case : Optional[Any] = use_labels
__snake_case : int = vocab_size
__snake_case : str = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Dict = intermediate_multiple_size
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = hidden_dropout
__snake_case : Any = attention_dropout
__snake_case : str = weight_tying
__snake_case : Optional[int] = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Tuple = initializer_range
__snake_case : Optional[int] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Union[str, Any] = scope
def _lowercase (self : List[str]) -> Dict:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case : str = None
if self.use_input_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__snake_case : Tuple = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase (self : Dict) -> int:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase (self : str) -> Optional[int]:
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.prepare_config_and_inputs()
__snake_case : List[str] = True
return config, input_ids, input_mask, token_labels
def _lowercase (self : Any , _A : Optional[Any] , _A : Any , _A : List[str]) -> Dict:
__snake_case : Optional[int] = GPTNeoXJapaneseModel(config=_A)
model.to(_A)
model.eval()
__snake_case : Dict = model(_A , attention_mask=_A)
__snake_case : int = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : int , _A : Optional[Any] , _A : Optional[Any] , _A : Tuple) -> Dict:
__snake_case : int = True
__snake_case : int = GPTNeoXJapaneseModel(_A)
model.to(_A)
model.eval()
__snake_case : Optional[Any] = model(_A , attention_mask=_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : Union[str, Any] , _A : Any , _A : List[str] , _A : Any , _A : str) -> Optional[Any]:
__snake_case : List[str] = GPTNeoXJapaneseForCausalLM(config=_A)
model.to(_A)
model.eval()
__snake_case : int = model(_A , attention_mask=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowercase (self : str , _A : Any , _A : Optional[Any] , _A : Optional[Any]) -> Union[str, Any]:
__snake_case : Tuple = True
__snake_case : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_A)
model.to(_A)
model.eval()
# first forward pass
__snake_case : List[Any] = model(_A , attention_mask=_A , use_cache=_A)
__snake_case : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case : str = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case : Optional[Any] = model(_A , attention_mask=_A , output_hidden_states=_A)
__snake_case : List[Any] = output_from_no_past['hidden_states'][0]
__snake_case : Any = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__snake_case : List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3))
def _lowercase (self : int) -> Any:
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : Any = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : str = False
UpperCAmelCase : List[str] = False
def _lowercase (self : Any) -> str:
__snake_case : List[Any] = GPTNeoXJapaneseModelTester(self)
__snake_case : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=37)
def _lowercase (self : Optional[int]) -> List[Any]:
self.config_tester.run_common_tests()
def _lowercase (self : Optional[Any]) -> List[str]:
__snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A)
def _lowercase (self : Any) -> List[str]:
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A)
def _lowercase (self : Dict) -> Dict:
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : str = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A)
def _lowercase (self : Tuple) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A)
def _lowercase (self : List[Any]) -> List[Any]:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A)
@slow
def _lowercase (self : Any) -> Any:
__snake_case : Dict = 'abeja/gpt-neox-japanese-2.7b'
__snake_case : List[Any] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__snake_case : List[str] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__snake_case : int = GPTNeoXJapaneseTokenizer.from_pretrained(_A)
__snake_case : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(_A)
__snake_case : List[str] = []
for prompt in prompts:
__snake_case : Union[str, Any] = tokenizer(_A , return_tensors='pt').input_ids
__snake_case : Tuple = model.generate(_A , max_length=50)
__snake_case : List[str] = tokenizer.batch_decode(_A , skip_special_tokens=_A)
predicted_outputs += generated_string
self.assertListEqual(_A , _A)
| 192
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : str ) -> list[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = int(UpperCAmelCase_ )
# Initialize Result
__snake_case : int = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase_ ):
# Find denominations
while int(UpperCAmelCase_ ) >= int(UpperCAmelCase_ ):
total_value -= int(UpperCAmelCase_ )
answer.append(UpperCAmelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_a : Optional[int]= []
_a : Optional[int]= "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_a : int= int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
_a : Optional[int]= input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_a : Tuple= [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
_a : List[str]= input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'''Following is minimal change for {value}: ''')
_a : List[Any]= find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 192
| 1
|
from math import pi
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 547
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A__ : Any = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A__ : Tuple = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = '''rougeLsum'''
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
_lowercase: List[str] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Tuple = ['''rouge1''', '''rouge2''', '''rougeL''']
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
assert score_sep == score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_lowercase: Union[str, Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) == calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: int = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_lowercase: Union[str, Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] , newline_sep=_UpperCamelCase )['''rougeLsum''']
_lowercase: Union[str, Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_lowercase: int = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
| 353
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(_lowerCamelCase )]
__SCREAMING_SNAKE_CASE : Optional[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can\'t be 0 or negative""" )
if key == 1 or len(_lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = position % (lowest * 2) # puts it in bounds
__SCREAMING_SNAKE_CASE : Any = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = [''.join(_lowerCamelCase ) for row in temp_grid]
__SCREAMING_SNAKE_CASE : List[Any] = ''.join(_lowerCamelCase )
return output_string
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can\'t be 0 or negative""" )
if key == 1:
return input_string
__SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(_lowerCamelCase )] # generates template
for position in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = position % (lowest * 2) # puts it in bounds
__SCREAMING_SNAKE_CASE : Any = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__SCREAMING_SNAKE_CASE : List[Any] = 0
for row in temp_grid: # fills in the characters
__SCREAMING_SNAKE_CASE : str = input_string[counter : counter + len(_lowerCamelCase )]
grid.append(list(_lowerCamelCase ) )
counter += len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = '' # reads as zigzag
for position in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Dict = position % (lowest * 2) # puts it in bounds
__SCREAMING_SNAKE_CASE : Any = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : int = {}
for key_guess in range(1 , len(_lowerCamelCase ) ): # tries every key
__SCREAMING_SNAKE_CASE : str = decrypt(_lowerCamelCase , _lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''t5'''
_A : Optional[Any] = ['''past_key_values''']
_A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]=3_2_1_2_8 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Dict=1_2_8 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : str=1E-6 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : Optional[Any]="relu" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : str = d_model
__SCREAMING_SNAKE_CASE : str = d_kv
__SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
__SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
__SCREAMING_SNAKE_CASE : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[int] = num_heads
__SCREAMING_SNAKE_CASE : Any = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : int = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = dropout_rate
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : Any = feed_forward_proj
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : List[str] = self.feed_forward_proj.split("""-""" )
__SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
__SCREAMING_SNAKE_CASE : int = act_info[0] == """gated"""
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__SCREAMING_SNAKE_CASE : Optional[Any] = """past_encoder_sequence + sequence"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch"""}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
return common_inputs
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return 1_3
| 178
| 0
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase ( _A : str , _A : Dict , _A : Optional[int] )-> List[str]:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , A_ )
A__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A__ = dataset_size < in_memory_max_size
else:
A__ = False
A__ = is_small_dataset(A_ )
assert result == expected
| 491
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a_ = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Tuple , *__lowerCAmelCase: str , **__lowerCAmelCase: Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 221
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 707
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase__ : List[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Tuple = AudioClassificationPipeline(model=__lowerCamelCase, feature_extractor=__lowerCamelCase )
# test with a raw waveform
UpperCamelCase : str = np.zeros((3_4000,) )
UpperCamelCase : int = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Optional[int] = examples
UpperCamelCase : Optional[Any] = audio_classifier(__lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowerCamelCase, [
{'score': ANY(__lowerCamelCase ), 'label': ANY(__lowerCamelCase )},
{'score': ANY(__lowerCamelCase ), 'label': ANY(__lowerCamelCase )},
], )
UpperCamelCase : Union[str, Any] = audio_classifier(__lowerCamelCase, top_k=1 )
self.assertEqual(
__lowerCamelCase, [
{'score': ANY(__lowerCamelCase ), 'label': ANY(__lowerCamelCase )},
], )
self.run_torchaudio(__lowerCamelCase )
@require_torchaudio
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
import datasets
# test with a local file
UpperCamelCase : Union[str, Any] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' )
UpperCamelCase : Dict = dataset[0]['''audio''']['''array''']
UpperCamelCase : List[str] = audio_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase, [
{'score': ANY(__lowerCamelCase ), 'label': ANY(__lowerCamelCase )},
{'score': ANY(__lowerCamelCase ), 'label': ANY(__lowerCamelCase )},
], )
@require_torch
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = '''anton-l/wav2vec2-random-tiny-classifier'''
UpperCamelCase : List[str] = pipeline('audio-classification', model=__lowerCamelCase )
UpperCamelCase : Tuple = np.ones((8000,) )
UpperCamelCase : int = audio_classifier(__lowerCamelCase, top_k=4 )
UpperCamelCase : Union[str, Any] = [
{'''score''': 0.08_42, '''label''': '''no'''},
{'''score''': 0.08_38, '''label''': '''up'''},
{'''score''': 0.08_37, '''label''': '''go'''},
{'''score''': 0.08_34, '''label''': '''right'''},
]
UpperCamelCase : Optional[int] = [
{'''score''': 0.08_45, '''label''': '''stop'''},
{'''score''': 0.08_44, '''label''': '''on'''},
{'''score''': 0.08_41, '''label''': '''right'''},
{'''score''': 0.08_34, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCamelCase : str = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
UpperCamelCase : Dict = audio_classifier(__lowerCamelCase, top_k=4 )
self.assertIn(nested_simplify(__lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case_ ( self ) -> Optional[int]:
import datasets
UpperCamelCase : int = '''superb/wav2vec2-base-superb-ks'''
UpperCamelCase : Dict = pipeline('audio-classification', model=__lowerCamelCase )
UpperCamelCase : Optional[int] = datasets.load_dataset('anton-l/superb_dummy', 'ks', split='test' )
UpperCamelCase : Union[str, Any] = np.array(dataset[3]['speech'], dtype=np.floataa )
UpperCamelCase : Union[str, Any] = audio_classifier(__lowerCamelCase, top_k=4 )
self.assertEqual(
nested_simplify(__lowerCamelCase, decimals=3 ), [
{'score': 0.9_81, 'label': 'go'},
{'score': 0.0_07, 'label': 'up'},
{'score': 0.0_06, 'label': '_unknown_'},
{'score': 0.0_01, 'label': 'down'},
], )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
| 40
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : str = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : int = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
__A : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__A : List[Any] = DDPMScheduler()
__A : Optional[Any] = AudioDiffusionPipeline(vqvae=__lowerCamelCase , unet=self.dummy_unet , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__A : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : List[str] = pipe(generator=__lowerCamelCase , steps=4 )
__A : Union[str, Any] = output.audios[0]
__A : str = output.images[0]
__A : List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : Any = pipe(generator=__lowerCamelCase , steps=4 , return_dict=__lowerCamelCase )
__A : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__A : Tuple = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
__A : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__A : int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__A : Any = DDIMScheduler()
__A : Optional[Any] = self.dummy_vqvae_and_unet
__A : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__A : Any = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
__A : str = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__A : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : int = pipe(raw_audio=__lowerCamelCase , generator=__lowerCamelCase , start_step=5 , steps=10 )
__A : Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__A : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : Optional[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__A : Union[str, Any] = self.dummy_unet_condition
__A : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowerCamelCase , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__A : Dict = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
__A : int = torch.rand((1, 1, 10) )
__A : Optional[Any] = pipe(generator=__lowerCamelCase , encoding=__lowerCamelCase )
__A : str = output.images[0]
__A : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : Tuple = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = torch_device
__A : List[Any] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
__A : List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : str = pipe(generator=__lowerCamelCase )
__A : Dict = output.audios[0]
__A : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__A : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : Optional[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 177
| 0
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a__ ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=True ):
'''simple docstring'''
model.train()
UpperCAmelCase__ : int = model(_lowerCamelCase )
UpperCAmelCase__ : Dict = F.mse_loss(_lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=False ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase__ : str = RegressionModel()
UpperCAmelCase__ : Dict = deepcopy(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = RegressionDataset(length=80 )
UpperCAmelCase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase__ : Any = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase__ : Tuple = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase__ : Any = LambdaLR(_lowerCamelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
UpperCAmelCase__ : Optional[Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a__ ( lowerCAmelCase : Any ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = get_training_setup(_lowerCamelCase )
# Use a single batch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = get_training_setup(_lowerCamelCase )
# Use a single batch
UpperCAmelCase__ , UpperCAmelCase__ : Any = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Union[str, Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def a__ ( lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : int = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def a__ ( lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_training_setup(_lowerCamelCase , _lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
UpperCAmelCase__ : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Accelerator()
UpperCAmelCase__ : Optional[int] = RegressionDataset(length=80 )
UpperCAmelCase__ : int = DataLoader(_lowerCamelCase , batch_size=16 )
UpperCAmelCase__ : str = RegressionDataset(length=96 )
UpperCAmelCase__ : str = DataLoader(_lowerCamelCase , batch_size=16 )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Accelerator()
UpperCAmelCase__ : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase )
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 700
|
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bert-generation"
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict=5_0358 , __SCREAMING_SNAKE_CASE : str=1024 , __SCREAMING_SNAKE_CASE : Dict=24 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : Any=4096 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1e-12 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Tuple="absolute" , __SCREAMING_SNAKE_CASE : List[str]=True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> int:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = vocab_size
a_ : Optional[int] = hidden_size
a_ : int = num_hidden_layers
a_ : Any = num_attention_heads
a_ : Optional[Any] = hidden_act
a_ : Optional[Any] = intermediate_size
a_ : Dict = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[int] = max_position_embeddings
a_ : Tuple = initializer_range
a_ : List[str] = layer_norm_eps
a_ : Optional[Any] = position_embedding_type
a_ : Dict = use_cache
| 466
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _UpperCAmelCase ( __A : int ):
a_ : Optional[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
a_ : Union[str, Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
a_ : List[Any] = 0.01
with locka.acquire():
with pytest.raises(__A ):
a_ : List[Any] = time.time()
locka.acquire(__A )
assert time.time() - _start > timeout
def _UpperCAmelCase ( __A : Tuple ):
a_ : Tuple = '''a''' * 10_00 + '''.lock'''
a_ : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__A )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
a_ : Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__A ):
locka.acquire(0 )
| 466
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = False ):
snake_case__ = scheduler
snake_case__ = optimizers if isinstance(lowerCamelCase , (list, tuple) ) else [optimizers]
snake_case__ = split_batches
snake_case__ = step_with_optimizer
snake_case__ = GradientState()
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case__ = AcceleratorState().num_processes
for _ in range(lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
else:
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
def A_ ( self ):
return self.scheduler.get_last_lr()
def A_ ( self ):
return self.scheduler.state_dict()
def A_ ( self , lowerCamelCase ):
self.scheduler.load_state_dict(lowerCamelCase )
def A_ ( self ):
return self.scheduler.get_lr()
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.scheduler.print_lr(*lowerCamelCase , **lowerCamelCase )
| 711
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case__ = ksize + 1
snake_case__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
snake_case__ = x - ksize // 2
snake_case__ = y - ksize // 2
# degree to radiant
snake_case__ = theta / 180 * np.pi
snake_case__ = np.cos(_theta )
snake_case__ = np.sin(_theta )
# get kernel x
snake_case__ = cos_theta * px + sin_theta * py
# get kernel y
snake_case__ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ = out / out.max() * 255
__magic_name__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 530
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
SCREAMING_SNAKE_CASE : List[Any] = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = " Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE : Optional[int] = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = dct.pop(__UpperCAmelCase )
A_ = val
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = torch.load(__UpperCAmelCase , map_location='''cpu''' )
A_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = emb.weight.shape
A_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
A_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
if not os.path.exists(__UpperCAmelCase ):
A_ = torch.hub.load('''pytorch/fairseq''' , __UpperCAmelCase ).eval()
else:
A_ = load_xsum_checkpoint(__UpperCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
A_ = checkpoint_path.replace('''.''' , '''-''' )
A_ = BartConfig.from_pretrained(__UpperCAmelCase )
A_ = bart.encode(__UpperCAmelCase ).unsqueeze(0 )
A_ = BartTokenizer.from_pretrained(__UpperCAmelCase ).encode(__UpperCAmelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(__UpperCAmelCase , __UpperCAmelCase ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
A_ = bart.state_dict()
remove_ignore_keys_(__UpperCAmelCase )
A_ = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
A_ = BartForSequenceClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
A_ = bart.predict('''mnli''' , __UpperCAmelCase , return_logits=__UpperCAmelCase )
A_ = model(__UpperCAmelCase )[0] # logits
else: # no classification heads to worry about
A_ = bart.model.state_dict()
remove_ignore_keys_(__UpperCAmelCase )
A_ = state_dict['decoder.embed_tokens.weight']
A_ = bart.extract_features(__UpperCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
A_ = BartModel(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
A_ = model(__UpperCAmelCase ).model[0]
else:
A_ = BartForConditionalGeneration(__UpperCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''lm_head''' ):
A_ = make_linear_from_emb(model.model.shared )
A_ = model.model(__UpperCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 141
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__magic_name__ = '''bart'''
__magic_name__ = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
if LOAD_DENSE_INDEX:
__snake_case : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__snake_case : List[Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__snake_case : Any = qar_model.eval()
else:
__snake_case , __snake_case : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : Tuple = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__snake_case : List[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__snake_case : List[str] = sas_model.eval()
else:
__snake_case , __snake_case : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
if LOAD_DENSE_INDEX:
__snake_case : List[Any] = faiss.StandardGpuResources()
__snake_case : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__snake_case : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
__snake_case : List[str] = faiss.IndexFlatIP(1_28 )
__snake_case : int = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : str = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__snake_case : List[Any] = elia['train_eli5']
__snake_case : Dict = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
__snake_case : str = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__magic_name__ , __magic_name__ , __magic_name__ = load_indexes()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = load_models()
__magic_name__ , __magic_name__ = load_train_data()
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=10 ):
__snake_case : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
__snake_case , __snake_case : Union[str, Any] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[int] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int="wiki40b" , __UpperCAmelCase : List[str]="dense" , __UpperCAmelCase : Any=10 ):
if source == "none":
__snake_case , __snake_case : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : int = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__snake_case , __snake_case : Dict = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
__snake_case : Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__snake_case : Any = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=64 , __UpperCAmelCase : int=2_56 , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Union[str, Any]=0.95 , __UpperCAmelCase : Tuple=0.8 ):
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__magic_name__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__magic_name__ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__magic_name__ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__magic_name__ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__magic_name__ = st.sidebar.checkbox('''Demo options''')
if demo_options:
__magic_name__ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__magic_name__ = action_list.index(action_st)
__magic_name__ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__magic_name__ = show_type == '''Show full text of passages'''
else:
__magic_name__ = 3
__magic_name__ = True
__magic_name__ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__magic_name__ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__magic_name__ = '''wiki40b'''
__magic_name__ = '''dense'''
__magic_name__ = '''beam'''
__magic_name__ = 2
__magic_name__ = 64
__magic_name__ = 256
__magic_name__ = None
__magic_name__ = None
__magic_name__ = st.sidebar.checkbox('''Generation options''')
if generate_options:
__magic_name__ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__magic_name__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__magic_name__ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__magic_name__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__magic_name__ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__magic_name__ = None
# start main text
__magic_name__ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__magic_name__ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__magic_name__ = st.text_input('''Enter your question here:''', '''''')
else:
__magic_name__ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__magic_name__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__magic_name__ = support_list[:10]
__magic_name__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__magic_name__ , __magic_name__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__magic_name__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__magic_name__ = res[1].strip()
if sec_titles == "":
__magic_name__ = '''[{}]({})'''.format(res[0], wiki_url)
else:
__magic_name__ = sec_titles.split(''' & ''')
__magic_name__ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__magic_name__ = find_nearest_training(question)
__magic_name__ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__magic_name__ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__magic_name__ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 576
| 0
|
import requests
__snake_case = """YOUR API KEY"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = giphy_api_key ) ->list:
lowercase_ = """+""".join(query.split() )
lowercase_ = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase_ = requests.get(SCREAMING_SNAKE_CASE_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 712
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase_ = k.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if k.startswith("""encoder""" ):
lowercase_ = k.replace(""".attn""" , """.self_attn""" )
lowercase_ = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowercase_ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowercase_ = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowercase_ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowercase_ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
lowercase_ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowercase_ = sd.pop(SCREAMING_SNAKE_CASE_ )
lowercase_ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowercase_ = v
__snake_case = ["""START"""]
@torch.no_grad()
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
lowercase_ = model["""model"""]
lowercase_ = BlenderbotConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase_ = BlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowercase_ = m.model.state_dict().keys()
lowercase_ = []
lowercase_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase_ = rename_state_dict_key(SCREAMING_SNAKE_CASE_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(SCREAMING_SNAKE_CASE_ )
m.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
m.half()
m.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
__snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 603
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__a ):
__UpperCamelCase : List[str] = ["""onnx"""]
def __init__( self :Dict , *SCREAMING_SNAKE_CASE :Any , **SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ["""onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Union[str, Any] , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""onnx"""] )
@classmethod
def __UpperCAmelCase ( cls :Dict , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
requires_backends(cls , ["""onnx"""] )
| 694
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = ["""audio_values""", """audio_mask"""]
def __init__(self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = spectrogram_length
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : List[Any] = feature_size // self.patch_size[1]
_UpperCAmelCase : List[str] = n_fft
_UpperCAmelCase : int = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : Optional[Any] = sampling_rate
_UpperCAmelCase : List[Any] = padding_value
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=lowerCAmelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
_UpperCAmelCase : Tuple = log_spec[:, :-1]
_UpperCAmelCase : Union[str, Any] = log_spec - 2_0.0
_UpperCAmelCase : List[Any] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : List[str] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase : Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : Optional[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : Dict = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase : Dict = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
_UpperCAmelCase : List[Any] = audio_features[i]
_UpperCAmelCase : Tuple = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_UpperCAmelCase : List[str] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 414
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowerCamelCase : List[str] =logging.get_logger(__name__)
_lowerCamelCase : List[Any] =TypeVar("DatasetType", Dataset, IterableDataset)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = None, lowerCAmelCase_ = None, lowerCAmelCase_ = None, lowerCAmelCase_ = "first_exhausted", ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_, (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCAmelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =(
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, stopping_strategy=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = None, lowerCAmelCase_ = 0, ):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_, (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCAmelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =(
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, axis=lowerCAmelCase_ )
| 720
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =3
SCREAMING_SNAKE_CASE =(32, 32)
SCREAMING_SNAKE_CASE =floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(snake_case )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=snake_case ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
return CLIPTextModel(snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,return_dict=snake_case ,)[0]
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE =unet.half()
SCREAMING_SNAKE_CASE =text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,num_inference_steps=2 ,output_type='np' ,).images
SCREAMING_SNAKE_CASE =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(
snake_case ,torch_dtype=torch.floataa ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(
snake_case ,torch_dtype=torch.floataa ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,num_inference_steps=5 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 252
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Dict = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 515
|
lowercase__ : Optional[int] = 9.8_0665
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = g) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 515
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Tuple = "Wav2Vec2FeatureExtractor"
snake_case__ : Any = "AutoTokenizer"
def __init__( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
@classmethod
def UpperCAmelCase_ ( cls : Dict , UpperCAmelCase__ : Any , **UpperCAmelCase__ : Dict ) -> List[Any]:
try:
return super().from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
return cls(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
def __call__( self : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase__ , **UpperCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__SCREAMING_SNAKE_CASE = kwargs.pop("raw_speech" )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop("audio" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = kwargs.pop("sampling_rate" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = kwargs.pop("text" , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self : str , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = kwargs.pop("input_features" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = kwargs.pop("labels" , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__SCREAMING_SNAKE_CASE = labels["input_ids"]
return input_features
def UpperCAmelCase_ ( self : str , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Any ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) -> str:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@contextmanager
def UpperCAmelCase_ ( self : Any ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 710
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 553
| 0
|
"""simple docstring"""
UpperCAmelCase =8.31_44_62 # Unit - J mol-1 K-1
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 617
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase =True
from torch.cuda.amp import autocast
UpperCAmelCase =logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
_lowerCamelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
_lowerCamelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
_lowerCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _A ( _a : ModelArguments , _a : TrainingArguments ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
A = logging.WARNING
if model_args.verbose_logging:
A = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A = logging.INFO
logger.setLevel(_a )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_lowerCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_lowerCamelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_lowerCamelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_lowerCamelCase = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = "longest"
_lowerCamelCase = None
_lowerCamelCase = None
def __call__( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
A = self.feature_extractor.pad(
lowerCamelCase_ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
A = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
A = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
A = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A = 1
A = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCamelCase_ ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=1.0 ,**lowerCamelCase_ ) -> Union[str, Any]:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
A = 0
A = max_gumbel_temp
A = min_gumbel_temp
A = gumbel_temp_decay
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> torch.Tensor:
model.train()
A = self._prepare_inputs(lowerCamelCase_ )
if self.use_amp:
with autocast():
A = self.compute_loss(lowerCamelCase_ ,lowerCamelCase_ )
else:
A = self.compute_loss(lowerCamelCase_ ,lowerCamelCase_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
A = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase_ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _A ( ):
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
configure_logger(_a , _a )
# Downloading and loading a dataset from the hub.
A = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_a )
def prepare_dataset(_a : Dict ):
# check that all files have the correct sampling rate
A , A = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A = datasets.map(
_a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
A = vectorized_datasets.filter(
lambda _a : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_a : Optional[Any] ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A = vectorized_datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
A = WavaVecaForPreTraining(_a )
A = DataCollatorForWavaVecaPretraining(model=_a , feature_extractor=_a )
A = WavaVecaPreTrainer(
model=_a , data_collator=_a , args=_a , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 617
| 1
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( __a : Optional[int] , __a : Any ) -> List[Any]:
_lowercase =nn.functional.normalize(__a )
_lowercase =nn.functional.normalize(__a )
return torch.mm(__a , normalized_text_embeds.t() )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPConfig
__SCREAMING_SNAKE_CASE = ['CLIPEncoderLayer']
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
_lowercase =CLIPVisionModel(config.vision_config )
_lowercase =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ )
_lowercase =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
_lowercase =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ )
_lowercase =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ )
_lowercase =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
_lowercase =self.visual_projection(lowerCAmelCase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy()
_lowercase =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy()
_lowercase =[]
_lowercase =image_embeds.shape[0]
for i in range(lowerCAmelCase_ ):
_lowercase ={"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowercase =special_cos_dist[i][concept_idx]
_lowercase =self.special_care_embeds_weights[concept_idx].item()
_lowercase =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_lowercase =0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowercase =cos_dist[i][concept_idx]
_lowercase =self.concept_embeds_weights[concept_idx].item()
_lowercase =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
_lowercase =[len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =self.vision_model(lowerCAmelCase_ )[1] # pooled_output
_lowercase =self.visual_projection(lowerCAmelCase_ )
_lowercase =cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
_lowercase =cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase =0.0
_lowercase =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowercase =torch.any(special_scores > 0 , dim=1 )
_lowercase =special_care * 0.0_1
_lowercase =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowercase =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowercase =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 594
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCamelCase ( __a : List[Any] , __a : Dict , __a : Optional[Any]=None , __a : List[Any]=None ) -> str:
if attention_mask is None:
_lowercase =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OPTConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=16 , lowerCAmelCase_=16 , ):
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =eos_token_id
_lowercase =pad_token_id
_lowercase =bos_token_id
_lowercase =embed_dim
_lowercase =word_embed_proj_dim
_lowercase =False
def __lowerCAmelCase ( self ):
_lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
_lowercase =prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =TFOPTModel(config=lowerCAmelCase_ )
_lowercase =inputs_dict["input_ids"]
_lowercase =input_ids[:1, :]
_lowercase =inputs_dict["attention_mask"][:1, :]
_lowercase =1
# first forward pass
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_lowercase , _lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase =output_from_no_past[:, -3:, random_slice_idx]
_lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _a ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFOPTForCausalLM,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 10
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowercase =model_class(config=lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowercase =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
_lowercase =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
_lowercase =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
def __lowerCamelCase ( __a : Tuple ) -> Dict:
return tf.constant(__a , dtype=tf.intaa )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 99
def __lowerCAmelCase ( self ):
_lowercase =tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowercase =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowercase =input_ids.shape[0]
_lowercase =OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModel.from_pretrained("facebook/opt-350m" )
_lowercase =_long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowercase =tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_lowercase =model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
_lowercase =(1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase_ )
_lowercase =tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
super().setUp()
_lowercase ="facebook/opt-350m"
def __lowerCAmelCase ( self ):
_lowercase =TFOPTForCausalLM.from_pretrained(self.path_model )
_lowercase =GPTaTokenizer.from_pretrained(self.path_model )
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowercase =tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-125m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
_lowercase ="left"
# use different length sentences to test batching
_lowercase =[
"Hello, my dog is a little",
"Today, I",
]
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ )
_lowercase =inputs["input_ids"]
_lowercase =model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"] )
_lowercase =tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ )
_lowercase =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_lowercase =tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =[
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 594
| 1
|
def __magic_name__ ( lowercase = 10**9 ) -> str:
"""simple docstring"""
lowercase_ : Dict = 1
lowercase_ : Union[str, Any] = 2
lowercase_ : Optional[int] = 0
lowercase_ : Tuple = 0
lowercase_ : List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase_ : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 458
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: Dict = '▁'
lowerCAmelCase: Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase: int = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase: Any = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
lowerCAmelCase: Optional[int] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase: List[Any] = {'mustc': MUSTC_LANGS}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
def __init__( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : str="<unk>" , __snake_case : Dict=False , __snake_case : int=False , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ):
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
a : Tuple = do_upper_case
a : Optional[Any] = do_lower_case
a : List[str] = load_json(__snake_case )
a : Dict = {v: k for k, v in self.encoder.items()}
a : int = spm_file
a : Tuple = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
a : Any = lang_codes
a : str = LANGUAGES[lang_codes]
a : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
a : str = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
a : Optional[Any] = self.lang_tokens
a : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a : List[str] = {}
@property
def lowercase_ ( self : Optional[Any] ):
return len(self.encoder )
@property
def lowercase_ ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self : int , __snake_case : Optional[int] ):
a : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def lowercase_ ( self : str , __snake_case : str ):
a : int = self.lang_code_to_id[tgt_lang]
a : int = [lang_code_id]
def lowercase_ ( self : Optional[int] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def lowercase_ ( self : List[Any] , __snake_case : int ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[Any] = []
a : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a : Union[str, Any] = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a : Optional[int] = []
else:
current_sub_tokens.append(__snake_case )
a : Tuple = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self : int , __snake_case : List[Any] , __snake_case : List[str]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
a : Optional[int] = [1] * len(self.prefix_tokens )
a : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
a : List[str] = self.__dict__.copy()
a : Union[str, Any] = None
return state
def __setstate__( self : str , __snake_case : Dict ):
a : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : int = {}
a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = Path(__snake_case )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
a : Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
a : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def lowerCamelCase__ ( _A ):
with open(_A , 'r' ) as f:
return json.load(_A )
def lowerCamelCase__ ( _A , _A ):
with open(_A , 'w' ) as f:
json.dump(_A , _A , indent=2 )
| 526
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __A ( lowerCAmelCase_ ):
return (data["data"], data["target"])
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase_ , lowerCAmelCase_ )
# Predict target for test data
_UpperCAmelCase : Tuple = xgb.predict(lowerCAmelCase_ )
_UpperCAmelCase : int = predictions.reshape(len(lowerCAmelCase_ ) , 1 )
return predictions
def __A ( ):
_UpperCAmelCase : str = fetch_california_housing()
_UpperCAmelCase , _UpperCAmelCase : List[Any] = data_handling(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = train_test_split(
lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 , random_state=1 )
_UpperCAmelCase : Any = xgboost(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(lowerCAmelCase_ , lowerCAmelCase_ )}" )
print(f"Mean Square Error : {mean_squared_error(lowerCAmelCase_ , lowerCAmelCase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 156
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCAmelCase : Union[str, Any] = processors[data_args.task_name]()
_UpperCAmelCase : int = processor.get_labels()
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
_UpperCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : int = trainer.evaluate()
_UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 156
| 1
|
def _lowerCamelCase ( snake_case = 100 ):
_lowerCAmelCase = (n * (n + 1) // 2) ** 2
_lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 192
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCAmelCase = [144, 192, 240]
_lowerCAmelCase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowerCAmelCase = [96, 120, 144]
_lowerCAmelCase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCAmelCase = [64, 80, 96]
_lowerCAmelCase = [16, 16, 24, 48, 64, 80, 320]
_lowerCAmelCase = 0.05
_lowerCAmelCase = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 21
_lowerCAmelCase = 'pascal-voc-id2label.json'
else:
_lowerCAmelCase = 1_000
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( snake_case , snake_case=False ):
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
_lowerCAmelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_lowerCAmelCase = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowerCAmelCase = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowerCAmelCase = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowerCAmelCase = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowerCAmelCase = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowerCAmelCase = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowerCAmelCase = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
_lowerCAmelCase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowerCAmelCase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowerCAmelCase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
_lowerCAmelCase = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowerCAmelCase = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowerCAmelCase = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowerCAmelCase = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowerCAmelCase = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowerCAmelCase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowerCAmelCase = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCAmelCase = 'mobilevit.' + name
return name
def _lowerCamelCase ( snake_case , snake_case , snake_case=False ):
if base_model:
_lowerCAmelCase = ''
else:
_lowerCAmelCase = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case )
if key[:8] == "encoder.":
_lowerCAmelCase = key[8:]
if "qkv" in key:
_lowerCAmelCase = key.split('.' )
_lowerCAmelCase = int(key_split[0][6:] ) - 1
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_lowerCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCAmelCase = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( ):
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=False ):
_lowerCAmelCase = get_mobilevit_config(snake_case )
# load original state_dict
_lowerCAmelCase = torch.load(snake_case , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = MobileViTForSemanticSegmentation(snake_case ).eval()
else:
_lowerCAmelCase = MobileViTForImageClassification(snake_case ).eval()
_lowerCAmelCase = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCAmelCase = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCAmelCase = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCAmelCase = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_lowerCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowerCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
_lowerCAmelCase = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowerCAmelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case , organization='apple' )
model.push_to_hub(snake_case , organization='apple' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase: List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 192
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=lowercase__ ).to(lowercase__ )
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_snake_case : Tuple = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_snake_case : Any = model(input_ids.to(lowercase__ ) , labels=labels.to(lowercase__ ) ).loss
_snake_case : int = -(labels.shape[-1] * loss.item())
_snake_case : Dict = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 1
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase = datasets.logging.get_logger(__name__)
lowercase = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def a_ ( self , a__ ):
if self.config_name == "default":
__SCREAMING_SNAKE_CASE : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def a_ ( self , a__ , a__ , a__ , a__=None , a__=False ):
if gpus is None:
__SCREAMING_SNAKE_CASE : List[str] = 1 if torch.cuda.is_available() else 0
__SCREAMING_SNAKE_CASE : Any = {"src": sources, "mt": predictions, "ref": references}
__SCREAMING_SNAKE_CASE : int = [dict(zip(a__ , a__ ) ) for t in zip(*data.values() )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.scorer.predict(a__ , gpus=a__ , progress_bar=a__ )
return {"mean_score": mean_score, "scores": scores}
| 211
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a__ , speech_processor=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , feature_extractor=a__ , )
def a_ ( self , a__ = "auto" ):
if slice_size == "auto":
__SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__ )
def a_ ( self ):
self.enable_attention_slicing(a__ )
@torch.no_grad()
def __call__( self , a__ , a__=16000 , a__ = 512 , a__ = 512 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.speech_processor.feature_extractor(
a__ , return_tensors="pt" , sampling_rate=a__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE : Optional[int] = self.speech_model.generate(a__ , max_length=480000 )
__SCREAMING_SNAKE_CASE : List[Any] = self.speech_processor.tokenizer.batch_decode(a__ , skip_special_tokens=a__ , normalize=a__ )[
0
]
if isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
elif isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = len(a__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(a__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(a__ )}.' )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
a__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__SCREAMING_SNAKE_CASE : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = text_embeddings.shape
__SCREAMING_SNAKE_CASE : int = text_embeddings.repeat(1 , a__ , 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , a__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
__SCREAMING_SNAKE_CASE : Any = [""] * batch_size
elif type(a__ ) is not type(a__ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(a__ )} !='
f' {type(a__ )}.' )
elif isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [negative_prompt]
elif batch_size != len(a__ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(a__ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = negative_prompt
__SCREAMING_SNAKE_CASE : Optional[int] = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
a__ , padding="max_length" , max_length=a__ , truncation=a__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE : Dict = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE : int = uncond_embeddings.repeat(1 , a__ , 1 )
__SCREAMING_SNAKE_CASE : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , a__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(a__ , generator=a__ , device="cpu" , dtype=a__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(a__ , generator=a__ , device=self.device , dtype=a__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__SCREAMING_SNAKE_CASE : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE : Dict = eta
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(a__ , a__ , encoder_hidden_states=a__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = 1 / 0.18215 * latents
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(a__ ).sample
__SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(a__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a__ , nsfw_content_detected=a__ )
| 211
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A__ : int = logging.get_logger(__name__)
A__ : Dict[Optional[str], Type[Formatter]] = {}
A__ : Dict[Optional[str], str] = {}
A__ : Dict[Optional[str], Exception] = {}
def a__ ( lowerCAmelCase : type , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[List[str]] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
UpperCAmelCase__ : Union[str, Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
UpperCAmelCase__ : Union[str, Any] = format_type
def a__ ( lowerCAmelCase : Exception , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[List[str]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCAmelCase__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
A__ : Union[str, Any] = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
A__ : Union[str, Any] = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
A__ : Any = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def a__ ( lowerCAmelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def a__ ( lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_format_type_from_alias(lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 660
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""note_seq"""]
def __init__( self : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(self , ['note_seq'])
@classmethod
def __lowerCamelCase ( cls : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['note_seq'])
@classmethod
def __lowerCamelCase ( cls : List[str] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(cls , ['note_seq'])
| 474
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,)
| 34
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
'''simple docstring'''
A: Union[str, Any] = GPTSanJapaneseTokenizer
A: Tuple = False
A: Any = {"do_clean_text": False, "add_prefix_space": False}
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
super().setUp()
# fmt: off
UpperCamelCase__ : int = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ : Tuple = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
UpperCamelCase__ : Optional[int] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Optional[int] , **lowerCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
UpperCamelCase__ : List[Any] = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_input_output_texts(lowerCamelCase__ )
UpperCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
UpperCamelCase__ : List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
UpperCamelCase__ : Optional[Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
UpperCamelCase__ : int = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing conversion to ids without special tokens
UpperCamelCase__ : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCamelCase__ : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing conversion to ids with special tokens
UpperCamelCase__ : str = tokens + [tokenizer.unk_token]
UpperCamelCase__ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCamelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
# Testing tokenization
UpperCamelCase__ : Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
UpperCamelCase__ : Optional[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
UpperCamelCase__ : str = '''こんにちは、世界。'''
UpperCamelCase__ : Optional[int] = '''こんばんは、㔺界。😀'''
UpperCamelCase__ : Optional[int] = '''こんにちは、世界。こんばんは、世界。😀'''
UpperCamelCase__ : List[str] = tokenizer.encode(prefix_text + input_text )
UpperCamelCase__ : List[str] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
UpperCamelCase__ : str = tokenizer.encode(lowerCamelCase__ , prefix_text=lowerCamelCase__ )
UpperCamelCase__ : Tuple = tokenizer.decode(lowerCamelCase__ )
UpperCamelCase__ : List[str] = tokenizer.decode(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
UpperCamelCase__ : Optional[int] = '''こんにちは、世界。'''
UpperCamelCase__ : Dict = '''こんばんは、㔺界。😀'''
UpperCamelCase__ : Optional[int] = len(tokenizer.encode(lowerCamelCase__ ) ) - 2
UpperCamelCase__ : int = len(tokenizer.encode(lowerCamelCase__ ) ) - 2
UpperCamelCase__ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCamelCase__ : Tuple = [1] * (len_prefix + len_text + 1) + [0]
UpperCamelCase__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCamelCase__ : Tuple = tokenizer(prefix_text + input_text ).token_type_ids
UpperCamelCase__ : Tuple = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
UpperCamelCase__ : List[str] = tokenizer(lowerCamelCase__ , prefix_text=lowerCamelCase__ ).token_type_ids
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
UpperCamelCase__ : str = tokenizer.encode('''あンいワ''' )
UpperCamelCase__ : Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
UpperCamelCase__ : List[str] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCamelCase__ ) , tokenizer.decode(lowerCamelCase__ ) )
self.assertEqual(tokenizer.decode(lowerCamelCase__ ) , tokenizer.decode(lowerCamelCase__ ) )
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
UpperCamelCase__ : Any = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
UpperCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.batch_encode_plus(lowerCamelCase__ , padding=lowerCamelCase__ )
# fmt: off
UpperCamelCase__ : Union[str, Any] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
UpperCamelCase__ : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCamelCase__ : List[str] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCamelCase__ )
self.assertListEqual(x_token.token_type_ids , lowerCamelCase__ )
self.assertListEqual(x_token.attention_mask , lowerCamelCase__ )
self.assertListEqual(x_token_a.input_ids , lowerCamelCase__ )
self.assertListEqual(x_token_a.token_type_ids , lowerCamelCase__ )
self.assertListEqual(x_token_a.attention_mask , lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
pass
| 715
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __magic_name__ ( nn.Module):
def __init__( self : Dict ) -> str:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(3 , 4 )
UpperCamelCase__ : str = nn.BatchNormad(4 )
UpperCamelCase__ : List[str] = nn.Linear(4 , 5 )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
return output + 1
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = ModelForTest()
UpperCamelCase__ : Union[str, Any] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(test_model._hf_hook , lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
UpperCamelCase__ : List[str] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ , append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ModelForTest()
UpperCamelCase__ : List[str] = torch.randn(2 , 3 )
UpperCamelCase__ : int = test_model(x + 1 )
UpperCamelCase__ : List[Any] = test_model(x + 2 )
UpperCamelCase__ : Optional[int] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase__ : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase__ : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
UpperCamelCase__ : str = torch.randn(2 , 3 )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
UpperCamelCase__ : int = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase__ : Dict = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase__ : List[Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , output + 2 , atol=1E-5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ModelForTest()
UpperCamelCase__ : Tuple = torch.randn(2 , 3 )
UpperCamelCase__ : Tuple = test_model(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Optional[int] = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCamelCase__ : Tuple = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__ , AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
UpperCamelCase__ : Tuple = torch.randn(2 , 3 ).to(0 )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : int = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : int = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.randn(2 , 3 )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
UpperCamelCase__ : Optional[Any] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : List[Any] = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : Any = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : str = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : int = torch.randn(2 , 3 )
UpperCamelCase__ : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : int = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : List[str] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : List[str] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : List[str] = torch.randn(2 , 3 )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() , offload_buffers=lowerCamelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : Union[str, Any] = torch.randn(2 , 3 )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 106
| 0
|
"""simple docstring"""
from math import factorial
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> float:
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
lowerCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCamelCase = float(factorial(__UpperCAmelCase ) )
coefficient /= factorial(__UpperCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.7_5))
| 543
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , **_snake_case : Dict ) -> List[str]:
super().__init__(**_snake_case )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_snake_case )
def lowerCAmelCase_ ( self : int , **_snake_case : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : int , _snake_case : List[str] , *_snake_case : int , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , **_snake_case : Tuple ) -> str:
return super().__call__(_snake_case , *_snake_case , num_workers=_snake_case , batch_size=_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : int , _snake_case : List[Any]=64 , _snake_case : int = 0 , _snake_case : float = 512 / 1500 , _snake_case : Optional[int] = 32 , _snake_case : Optional[int] = 1 , ) -> int:
SCREAMING_SNAKE_CASE__ = load_image(_snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processor.size["longest_edge"]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.generate_crop_boxes(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processor(images=_snake_case , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ = self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE__ = self._ensure_tensor_on_device(_snake_case , device=self.device )
SCREAMING_SNAKE_CASE__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
SCREAMING_SNAKE_CASE__ = image_embeddings
SCREAMING_SNAKE_CASE__ = grid_points.shape[1]
SCREAMING_SNAKE_CASE__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , _snake_case , _snake_case ):
SCREAMING_SNAKE_CASE__ = grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE__ = input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[int] , _snake_case : Dict=0.88 , _snake_case : List[Any]=0.95 , _snake_case : List[Any]=0 , _snake_case : List[str]=1 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = model_inputs.pop("input_boxes" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("is_last" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("original_sizes" ).tolist()
SCREAMING_SNAKE_CASE__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
SCREAMING_SNAKE_CASE__ = self.model(**_snake_case )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE__ = model_outputs["pred_masks"]
SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_masks(
_snake_case , _snake_case , _snake_case , _snake_case , binarize=_snake_case )
SCREAMING_SNAKE_CASE__ = model_outputs["iou_scores"]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _snake_case , _snake_case , _snake_case , _snake_case , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase_ ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Any=False , _snake_case : str=False , _snake_case : List[str]=0.7 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
SCREAMING_SNAKE_CASE__ = torch.cat(_snake_case )
SCREAMING_SNAKE_CASE__ = torch.cat(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_for_mask_generation(
_snake_case , _snake_case , _snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = defaultdict(_snake_case )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_snake_case )
SCREAMING_SNAKE_CASE__ = {}
if output_rle_mask:
SCREAMING_SNAKE_CASE__ = rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 159
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , __SCREAMING_SNAKE_CASE ) is not None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = object_name.split('.' )
lowercase__ = 0
# First let's find the module where our object lives.
lowercase__ = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , F"""{module}.py""" ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
lowercase__ = os.path.join(__SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__SCREAMING_SNAKE_CASE , F"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
# Now let's find the class / func in the code!
lowercase__ = ""
lowercase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase__ = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase__ = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = code.split('\n' )
lowercase__ = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowercase__ = F"""class Bla:\n{code}"""
lowercase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__SCREAMING_SNAKE_CASE )
lowercase__ = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowercase__ = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len('class Bla:\n' ) :] if has_indent else result
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
lowercase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase__ = search.groups()
lowercase__ = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
lowercase__ = get_indent(__SCREAMING_SNAKE_CASE )
lowercase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase__ = theoretical_indent
lowercase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase__ = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
lowercase__ = lines[line_index]
lowercase__ = _should_continue(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and re.search(F"""^{indent}# End copy""" , __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase__ = lines[start_index:line_index]
lowercase__ = "".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase__ = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
lowercase__ = "\n".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = replace_pattern.replace('with' , '' ).split(',' )
lowercase__ = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase__ = pattern.groups()
lowercase__ = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowercase__ = re.sub(obja.lower() , obja.lower() , __SCREAMING_SNAKE_CASE )
lowercase__ = re.sub(obja.upper() , obja.upper() , __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase__ = blackify(lines[start_index - 1] + theoretical_code )
lowercase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase__ = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = False ) -> Any:
lowercase__ = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=__SCREAMING_SNAKE_CASE )
lowercase__ = []
for filename in all_files:
lowercase__ = is_copy_consistent(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = "\n".join(__SCREAMING_SNAKE_CASE )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
"""simple docstring"""
import baseaa
def _snake_case ( snake_case__ : str ):
return baseaa.baaencode(string.encode('utf-8' ) )
def _snake_case ( snake_case__ : bytes ):
return baseaa.baadecode(snake_case__ ).decode('utf-8' )
if __name__ == "__main__":
_lowercase = '''Hello World!'''
_lowercase = baseaa_encode(test)
print(encoded)
_lowercase = baseaa_decode(encoded)
print(decoded)
| 91
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | str ):
UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 100_0000 ):
UpperCAmelCase = 0
for i in range(1 , SCREAMING_SNAKE_CASE ):
if is_palindrome(SCREAMING_SNAKE_CASE ) and is_palindrome(bin(SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 447
| 0
|
'''simple docstring'''
__lowerCamelCase : Any = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 710
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656
| 0
|
"""simple docstring"""
import math
import qiskit
def UpperCAmelCase__ (snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : int = 1 ):
"""simple docstring"""
if (
isinstance(snake_case__ , snake_case__ )
or isinstance(snake_case__ , snake_case__ )
or isinstance(snake_case__ , snake_case__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(snake_case__ ) != input_a)
or (math.floor(snake_case__ ) != input_a)
or (math.floor(snake_case__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
_snake_case : Any = qiskit.QuantumRegister(4 , """qr""" )
_snake_case : List[str] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
_snake_case : Dict = [input_a, input_a, carry_in]
_snake_case : Optional[Any] = qiskit.QuantumCircuit(snake_case__ , snake_case__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(snake_case__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(snake_case__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(snake_case__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , snake_case__ ) # measure the last two qbits
_snake_case : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
_snake_case : Union[str, Any] = qiskit.execute(snake_case__ , snake_case__ , shots=10_00 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 609
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["pixel_values"]
def __init__( self: List[Any], a_: bool = True, a_: Optional[Dict[str, int]] = None, a_: PILImageResampling = PILImageResampling.BICUBIC, a_: bool = True, a_: bool = True, a_: Union[int, float] = 1 / 255, a_: Dict[str, int] = None, a_: bool = True, a_: Optional[Union[float, List[float]]] = None, a_: Optional[Union[float, List[float]]] = None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
_snake_case : str = get_size_dict(a_ )
_snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case : Union[str, Any] = get_size_dict(a_, default_to_square=a_, param_name="""crop_size""" )
_snake_case : Union[str, Any] = do_resize
_snake_case : Union[str, Any] = do_rescale
_snake_case : List[str] = do_normalize
_snake_case : int = do_center_crop
_snake_case : str = crop_size
_snake_case : Tuple = size
_snake_case : Any = resample
_snake_case : Tuple = rescale_factor
_snake_case : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self: int, a_: np.ndarray, a_: Dict[str, int], a_: PILImageResampling = PILImageResampling.BILINEAR, a_: Optional[Union[str, ChannelDimension]] = None, **a_: str, ):
'''simple docstring'''
_snake_case : Optional[Any] = get_size_dict(a_ )
if "shortest_edge" in size:
_snake_case : Dict = get_resize_output_image_size(a_, size=size["""shortest_edge"""], default_to_square=a_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_snake_case : int = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: List[Any], a_: np.ndarray, a_: Dict[str, int], a_: Optional[Union[str, ChannelDimension]] = None, **a_: Optional[int], ):
'''simple docstring'''
_snake_case : List[str] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(a_, size=(size["""height"""], size["""width"""]), data_format=a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: np.ndarray, a_: float, a_: Optional[Union[str, ChannelDimension]] = None, **a_: int ):
'''simple docstring'''
return rescale(a_, scale=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: Dict, a_: np.ndarray, a_: Union[float, List[float]], a_: Union[float, List[float]], a_: Optional[Union[str, ChannelDimension]] = None, **a_: int, ):
'''simple docstring'''
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: ImageInput, a_: Optional[bool] = None, a_: Dict[str, int] = None, a_: PILImageResampling = None, a_: bool = None, a_: int = None, a_: Optional[bool] = None, a_: Optional[float] = None, a_: Optional[bool] = None, a_: Optional[Union[float, List[float]]] = None, a_: Optional[Union[float, List[float]]] = None, a_: Optional[Union[str, TensorType]] = None, a_: Union[str, ChannelDimension] = ChannelDimension.FIRST, **a_: List[Any], ):
'''simple docstring'''
_snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : int = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : Dict = crop_size if crop_size is not None else self.crop_size
_snake_case : int = get_size_dict(a_, param_name="""crop_size""", default_to_square=a_ )
_snake_case : Any = resample if resample is not None else self.resample
_snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
_snake_case : Any = image_std if image_std is not None else self.image_std
_snake_case : Optional[int] = size if size is not None else self.size
_snake_case : List[Any] = get_size_dict(a_ )
if not is_batched(a_ ):
_snake_case : Tuple = [images]
if not valid_images(a_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_snake_case : str = [to_numpy_array(a_ ) for image in images]
if do_resize:
_snake_case : int = [self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_center_crop:
_snake_case : Optional[Any] = [self.center_crop(image=a_, size=a_ ) for image in images]
if do_rescale:
_snake_case : Any = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
_snake_case : Any = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
_snake_case : Optional[Any] = [to_channel_dimension_format(a_, a_ ) for image in images]
_snake_case : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 609
| 1
|
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
return base * power(__snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case = int(input('''Enter the base: ''').strip())
snake_case = int(input('''Enter the exponent: ''').strip())
snake_case = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 707
|
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657
| 0
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = '''▁'''
snake_case = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
snake_case = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
snake_case = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
snake_case = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
snake_case = {'''mustc''': MUSTC_LANGS}
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = MAX_MODEL_INPUT_SIZES
__A = ["input_ids", "attention_mask"]
__A = []
def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[str]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Optional[Any]="<unk>" , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_upper_case=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , lang_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowerCAmelCase = do_upper_case
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = load_json(__lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = spm_file
_lowerCAmelCase = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
_lowerCAmelCase = lang_codes
_lowerCAmelCase = LANGUAGES[lang_codes]
_lowerCAmelCase = [F"<lang:{lang}>" for lang in self.langs]
_lowerCAmelCase = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
_lowerCAmelCase = self.lang_tokens
_lowerCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_lowerCAmelCase = {}
@property
def a ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
@property
def a ( self : Any ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def a ( self : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowerCAmelCase )
def a ( self : str , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCAmelCase = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase = [lang_code_id]
def a ( self : str , __lowerCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def a ( self : Dict , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def a ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def a ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCAmelCase = self.sp_model.decode(__lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCAmelCase = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCAmelCase = self.sp_model.decode(__lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def a ( self : List[Any] ):
"""simple docstring"""
_lowerCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : List[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase = {}
_lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCAmelCase = Path(__lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
_lowerCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_lowerCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , 'wb' ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def A_ ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ):
_lowerCAmelCase = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def A_ ( _lowerCamelCase : str ):
with open(_lowerCamelCase , 'r' ) as f:
return json.load(_lowerCamelCase )
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : str ):
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( _lowerCamelCase : list[float] ):
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
__lowerCAmelCase = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
__lowerCAmelCase = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def A_ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
return float((preds == labels).mean() )
def A_ ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ):
lowercase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
lowercase = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def A_ ( __UpperCamelCase : List[str] , __UpperCamelCase : str ):
lowercase = np.array(UpperCamelCase__ )
lowercase = np.array(UpperCamelCase__ )
lowercase = en_sentvecs.shape[0]
# mean centering
lowercase = en_sentvecs - np.mean(UpperCamelCase__ , axis=0 )
lowercase = in_sentvecs - np.mean(UpperCamelCase__ , axis=0 )
lowercase = cdist(UpperCamelCase__ , UpperCamelCase__ , '''cosine''' )
lowercase = np.array(range(UpperCamelCase__ ) )
lowercase = sim.argsort(axis=1 )[:, :10]
lowercase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def _lowerCAmelCase ( self : Tuple , a : List[str] , a : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a , a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a , a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a , a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 720
|
"""simple docstring"""
def A_ ( __UpperCamelCase : list ):
for i in range(len(__UpperCamelCase ) - 1 , 0 , -1 ):
lowercase = False
for j in range(__UpperCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase , lowercase = unsorted[j - 1], unsorted[j]
lowercase = True
for j in range(__UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
lowercase , lowercase = unsorted[j + 1], unsorted[j]
lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 396
| 0
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __snake_case ( __A ,__A ,__A ,__A ,__A ) -> int:
# load base model
lowercase : int = StableDiffusionPipeline.from_pretrained(__A ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase : List[str] = load_file(__A )
lowercase : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase : List[str] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase : List[str] = pipeline.text_encoder
else:
lowercase : Optional[int] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase : Tuple = pipeline.unet
# find the target layer
lowercase : Dict = layer_infos.pop(0 )
while len(__A ) > -1:
try:
lowercase : int = curr_layer.__getattr__(__A )
if len(__A ) > 0:
lowercase : List[Any] = layer_infos.pop(0 )
elif len(__A ) == 0:
break
except Exception:
if len(__A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase : List[Any] = layer_infos.pop(0 )
lowercase : Tuple = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" ,"""lora_up""" ) )
pair_keys.append(__A )
else:
pair_keys.append(__A )
pair_keys.append(key.replace("""lora_up""" ,"""lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase : Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A ,__A ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
lowercase : Any = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A ,__A )
# update visited list
for item in pair_keys:
visited.append(__A )
return pipeline
if __name__ == "__main__":
lowerCAmelCase: Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
lowerCAmelCase: int =parser.parse_args()
lowerCAmelCase: Any =args.base_model_path
lowerCAmelCase: Any =args.checkpoint_path
lowerCAmelCase: Optional[int] =args.dump_path
lowerCAmelCase: List[Any] =args.lora_prefix_unet
lowerCAmelCase: List[str] =args.lora_prefix_text_encoder
lowerCAmelCase: Union[str, Any] =args.alpha
lowerCAmelCase: Optional[Any] =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase: List[str] =pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 607
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __snake_case ( __A ,__A = "cpu" ,__A = None ) -> None:
lowercase : Optional[int] = torch.load(__A ,map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A ,torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowercase : List[str] = v.half()
if save_path is None: # overwrite src_path
lowercase : List[str] = src_path
torch.save(__A ,__A )
if __name__ == "__main__":
fire.Fire(convert)
| 607
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
_UpperCAmelCase = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
_UpperCAmelCase = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
from math import sqrt
def a_ ( lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'status' must been from type bool"
return status
def a_ ( lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1 ) )
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1, len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def a_ ( lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def a_ ( lowerCAmelCase_ : Tuple ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def a_ ( lowerCAmelCase_ : Dict ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCAmelCase_ )
__lowerCAmelCase = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def a_ ( lowerCAmelCase_ : Tuple ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def a_ ( lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( lowerCAmelCase_ : str ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( lowerCAmelCase_ : Any ):
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int ):
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any] ):
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCAmelCase_ )
__lowerCAmelCase = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCAmelCase_ )
__lowerCAmelCase = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_, lowerCAmelCase_ ) ):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( lowerCAmelCase_ : str ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : str ):
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCAmelCase_ ), abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( lowerCAmelCase_ : Dict ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def a_ ( lowerCAmelCase_ : Dict ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 53
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53
| 1
|
from math import ceil
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(range(0 , _A ) )
SCREAMING_SNAKE_CASE__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE__ = []
for i in device_map_blocks:
if device_map_blocks.count(_A ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_A )
# Missing blocks
SCREAMING_SNAKE_CASE__ = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE__ = [i for i in device_map_blocks if i not in blocks]
if len(_A ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(_A ) )
if len(_A ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(_A ) )
if len(_A ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(_A ) )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(range(_A ) )
SCREAMING_SNAKE_CASE__ = int(ceil(n_layers / len(_A ) ) )
SCREAMING_SNAKE_CASE__ = [layers[i : i + n_blocks] for i in range(0 , _A , _A )]
return dict(zip(_A , _A ) )
| 472
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyImgaImgPipeline
a = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : str ) -> List[str]:
return 32
@property
def lowercase_ ( self : Optional[int] ) -> int:
return 32
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ) -> Any:
return 100
@property
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : str ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Dict ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 472
| 1
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
__a : Optional[Any] = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a : Dict = PandasConfig
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self, snake_case__ ) -> str:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase, (str, list, tuple) ):
lowercase_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase, _lowerCamelCase ):
lowercase_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase_ : Any = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"""files""": files} )]
lowercase_ : Any = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase, _lowerCamelCase ):
lowercase_ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase, gen_kwargs={"""files""": files} ) )
return splits
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ : Dict = table_cast(_lowerCamelCase, self.config.features.arrow_schema )
return pa_table
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase, """rb""" ) as f:
lowercase_ : int = pa.Table.from_pandas(pd.read_pickle(_lowerCamelCase ) )
yield i, self._cast_table(_lowerCamelCase )
| 458
|
"""simple docstring"""
import numpy as np
def _UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : float = 1E-1_2 , __lowerCamelCase : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowerCamelCase ) == np.iscomplexobj(__lowerCamelCase )
_snake_case = np.iscomplexobj(__lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
# Normalize the resulting output vector.
_snake_case = w / np.linalg.norm(__lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_snake_case = vector.conj().T if is_complex else vector.T
_snake_case = np.dot(__lowerCamelCase , np.dot(__lowerCamelCase , __lowerCamelCase ) )
# Check convergence.
_snake_case = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_snake_case = True
_snake_case = lambda_
if is_complex:
_snake_case = np.real(lambda_ )
return lambda_, vector
def _UpperCAmelCase ( ) -> None:
_snake_case = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_snake_case = np.array([41, 4, 20] )
_snake_case = real_input_matrix.astype(np.complexaaa )
_snake_case = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_snake_case = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_snake_case = real_input_matrix
_snake_case = real_vector
elif problem_type == "complex":
_snake_case = complex_input_matrix
_snake_case = complex_vector
# Our implementation.
_snake_case , _snake_case = power_iteration(__lowerCamelCase , __lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_snake_case , _snake_case = np.linalg.eigh(__lowerCamelCase )
# Last eigenvalue is the maximum one.
_snake_case = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_snake_case = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowerCamelCase ) - np.abs(__lowerCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 224
| 0
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __snake_case ( SCREAMING_SNAKE_CASE: np.ndarray , SCREAMING_SNAKE_CASE: tuple[int, int] , SCREAMING_SNAKE_CASE: tuple[int, int] , SCREAMING_SNAKE_CASE: bool , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = grid.shape
_lowerCAmelCase = [-1, 1, 0, 0]
_lowerCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCAmelCase , _lowerCAmelCase = [(0, source)], set()
_lowerCAmelCase = np.full((rows, cols) , np.inf )
_lowerCAmelCase = 0
_lowerCAmelCase = np.empty((rows, cols) , dtype=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = None
while queue:
((_lowerCAmelCase) , (_lowerCAmelCase)) = heappop(SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_lowerCAmelCase , _lowerCAmelCase = predecessors[x, y]
path.append(SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase , _lowerCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
_lowerCAmelCase = dist + 1
_lowerCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_snake_case = getLogger(__name__)
_snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: int = 8 , SCREAMING_SNAKE_CASE: str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE: Optional[int]=False , SCREAMING_SNAKE_CASE: int="summarization" , SCREAMING_SNAKE_CASE: List[Any]=None , **SCREAMING_SNAKE_CASE: Tuple , ):
"""simple docstring"""
_lowerCAmelCase = Path(SCREAMING_SNAKE_CASE ).open('w' , encoding='utf-8' )
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
if fpaa:
_lowerCAmelCase = model.half()
_lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if prefix is None:
_lowerCAmelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ):
_lowerCAmelCase = [prefix + text for text in examples_chunk]
_lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE , padding='longest' ).to(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
_lowerCAmelCase = int(time.time() - start_time ) # seconds
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __snake_case ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __snake_case ( SCREAMING_SNAKE_CASE: Any=True ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE , default=8 , required=SCREAMING_SNAKE_CASE , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE , default=-1 , required=SCREAMING_SNAKE_CASE , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
_lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_lowerCAmelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
_lowerCAmelCase = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
_lowerCAmelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
_lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = score_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
scores.update(SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE )
if args.info:
_lowerCAmelCase = args.info
if verbose:
print(SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 491
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase__ ( lowerCamelCase_ : BertModel , lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : str = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__a : int = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__a : Union[str, Any] = model.state_dict()
def to_tf_var_name(lowerCamelCase_ : str ):
for patt, repl in iter(lowerCamelCase_ ):
__a : Union[str, Any] = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return f'''bert/{name}'''
def create_tf_var(lowerCamelCase_ : np.ndarray , lowerCamelCase_ : str , lowerCamelCase_ : tf.Session ):
__a : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__a : List[str] = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Optional[Any] = to_tf_var_name(lowerCamelCase_ )
__a : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a : Union[str, Any] = torch_tensor.T
__a : str = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = session.run(lowerCamelCase_ )
print(f'''Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}''' )
__a : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any]=None ):
__a : str = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory in which to save tensorflow model' )
__a : List[Any] = parser.parse_args(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 47
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47
| 1
|
'''simple docstring'''
import random
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : List[Any] ) -> tuple:
__snake_case , __snake_case , __snake_case : int = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCAmelCase )
elif element > pivot:
greater.append(_UpperCAmelCase )
else:
equal.append(_UpperCAmelCase )
return less, equal, greater
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ) -> int:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_UpperCAmelCase ) or index < 0:
return None
__snake_case : int = items[random.randint(0 ,len(_UpperCAmelCase ) - 1 )]
__snake_case : List[Any] = 0
__snake_case , __snake_case , __snake_case : Any = _partition(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : List[str] = len(_UpperCAmelCase )
__snake_case : int = len(_UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCAmelCase ,_UpperCAmelCase )
# must be in larger
else:
return quick_select(_UpperCAmelCase ,index - (m + count) )
| 124
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : int = 'tf'
def A_ ( self : str , __a : Any ) -> Tuple:
'''simple docstring'''
__snake_case : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : List[Any] , __a : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
__snake_case : Any = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Dict = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Any = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : int = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Tuple = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : int = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Any = FeaturesManager.determine_framework(self.test_model )
| 124
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Any = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
lowerCamelCase : Optional[int] = {'mobilebert-uncased': 512}
lowerCamelCase : Dict = {}
class __lowerCAmelCase (A__ ):
'''simple docstring'''
lowerCAmelCase__ : str = VOCAB_FILES_NAMES
lowerCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[Any] = MobileBertTokenizer
def __init__(self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=True , UpperCamelCase : Tuple="[UNK]" , UpperCamelCase : List[str]="[SEP]" , UpperCamelCase : List[str]="[PAD]" , UpperCamelCase : Any="[CLS]" , UpperCamelCase : List[Any]="[MASK]" , UpperCamelCase : str=True , UpperCamelCase : Tuple=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
lowercase__ = getattr(snake_case_ , normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**snake_case_ )
lowercase__ = do_lower_case
def UpperCamelCase__ (self : Any , UpperCamelCase : Tuple , UpperCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 460
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase (_lowerCAmelCase ):
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_lowerCAmelCase , """_dynamo""" ):
return False
return isinstance(_lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def lowercase (_lowerCAmelCase , _lowerCAmelCase = True ):
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_lowerCAmelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_lowerCAmelCase , """forward""" )
__lowerCAmelCase = model.__dict__.pop("""_original_forward""" , _lowerCAmelCase )
if original_forward is not None:
while hasattr(_lowerCAmelCase , """__wrapped__""" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_lowerCAmelCase , """_converted_to_transformer_engine""" , _lowerCAmelCase ):
convert_model(_lowerCAmelCase , to_transformer_engine=_lowerCAmelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def lowercase ():
PartialState().wait_for_everyone()
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowerCAmelCase , _lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(_lowerCAmelCase , _lowerCAmelCase )
@contextmanager
def lowercase (**_lowerCAmelCase ):
for key, value in kwargs.items():
__lowerCAmelCase = str(_lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase (_lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , """__qualname__""" ) and not hasattr(_lowerCAmelCase , """__name__""" ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """__class__""" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , """__qualname__""" ):
return obj.__qualname__
if hasattr(_lowerCAmelCase , """__name__""" ):
return obj.__name__
return str(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for key, value in source.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = destination.setdefault(_lowerCAmelCase , {} )
merge_dicts(_lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase = value
return destination
def lowercase (_lowerCAmelCase = None ):
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 465
| 0
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.bias']
return model
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.bias']
return model
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE = downstream_dict['objective.W']
return model
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE = checkpoint['Downstream']
SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a_ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 703
|
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673
| 0
|
import functools
def lowerCamelCase__ ( __A :list[int] ,__A :list[int] ):
"""simple docstring"""
if not isinstance(__A ,__A ) or not all(isinstance(__A ,__A ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__A ) != 3 or not all(isinstance(__A ,__A ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__A ) == 0:
return 0
if min(__A ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__A ) >= 3_6_6:
raise ValueError("""All days elements should be less than 366""" )
__snake_case = set(__A )
@functools.cache
def dynamic_programming(__A :int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 3_0 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
UpperCamelCase__ = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
UpperCamelCase__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
UpperCamelCase__ = {'''mustc''': MUSTC_LANGS}
class __snake_case ( snake_case__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = MAX_MODEL_INPUT_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = []
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<unk>" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
"""simple docstring"""
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , do_upper_case=_UpperCamelCase , do_lower_case=_UpperCamelCase , tgt_lang=_UpperCamelCase , lang_codes=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
__snake_case = do_upper_case
__snake_case = do_lower_case
__snake_case = load_json(_UpperCamelCase )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = spm_file
__snake_case = load_spm(_UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case = lang_codes
__snake_case = LANGUAGES[lang_codes]
__snake_case = [F'<lang:{lang}>' for lang in self.langs]
__snake_case = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
__snake_case = self.lang_tokens
__snake_case = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case = {}
@property
def a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def a ( self ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def a ( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCamelCase )
def a ( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = self.lang_code_to_id[tgt_lang]
__snake_case = [lang_code_id]
def a ( self , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def a ( self , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder[self.unk_token] )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase , self.unk_token )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
__snake_case = []
__snake_case = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case = self.sp_model.decode(_UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case = []
else:
current_sub_tokens.append(_UpperCamelCase )
__snake_case = self.sp_model.decode(_UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
__snake_case = [1] * len(self.prefix_tokens )
__snake_case = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case = {}
__snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case = Path(_UpperCamelCase )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
__snake_case = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__snake_case = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCamelCase , """wb""" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (str(_UpperCamelCase ), str(_UpperCamelCase ))
def lowerCamelCase__ ( __A :str ,__A :Dict[str, Any] ):
"""simple docstring"""
__snake_case = sentencepiece.SentencePieceProcessor(**__A )
spm.Load(str(__A ) )
return spm
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
with open(__A ,"""r""" ) as f:
return json.load(__A )
def lowerCamelCase__ ( __A :Optional[Any] ,__A :str ):
"""simple docstring"""
with open(__A ,"""w""" ) as f:
json.dump(__A ,__A ,indent=2 )
| 268
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , __a : Tuple , __a : List[str]=False , __a : Dict=True , __a : Optional[Any]=False , __a : Dict="<s>" , __a : Any="</s>" , __a : int="<unk>" , __a : int="<sep>" , __a : Any="<pad>" , __a : List[Any]="<cls>" , __a : str="<mask>" , __a : Union[str, Any]=["<eop>", "<eod>"] , __a : Optional[Dict[str, Any]] = None , **__a : str , ) -> None:
"""simple docstring"""
__lowercase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowercase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Dict = 3
__lowercase : List[Any] = do_lower_case
__lowercase : Tuple = remove_space
__lowercase : Optional[Any] = keep_accents
__lowercase : str = vocab_file
__lowercase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__lowercase : int = jieba
__lowercase : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.__dict__.copy()
__lowercase : Tuple = None
return state
def __setstate__( self : Any , __a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : Optional[Any] = {}
__lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Dict , __a : List[Any] ) -> List[Any]:
"""simple docstring"""
if self.remove_space:
__lowercase : Optional[int] = """ """.join(inputs.strip().split() )
else:
__lowercase : List[Any] = inputs
__lowercase : Dict = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowercase : int = unicodedata.normalize("""NFKD""" , __a )
__lowercase : Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
__lowercase : Optional[int] = outputs.lower()
return outputs
def lowerCAmelCase ( self : Any , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.preprocess_text(__a )
__lowercase : List[str] = self.sp_model.encode(__a , out_type=__a )
__lowercase : str = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase : List[Any] = cur_pieces[1:]
else:
__lowercase : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(__a )
def lowerCAmelCase ( self : List[str] , __a : List[str] ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(__a )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = """""".join(__a ).replace(__a , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : List[str] = [self.sep_token_id]
__lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def lowerCAmelCase ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : int = [self.sep_token_id]
__lowercase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : str = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def lowerCAmelCase ( self : str , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = super()._decode(*__a , **__a )
__lowercase : Any = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[Any] = ""
for word_or_phrase in separated:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 436
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_SCREAMING_SNAKE_CASE : List[Any] = Lock()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__magic_name__ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__magic_name__ : int = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__magic_name__ : Optional[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__magic_name__ : List[str] = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : int = []
__magic_name__ : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__magic_name__ : Union[str, Any] = Pipe()
__magic_name__ : List[str] = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__magic_name__ : int = temp_rs
__magic_name__ : List[str] = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
__magic_name__ : Optional[int] = Pipe()
__magic_name__ : Optional[int] = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__magic_name__ : int = temp_rs
__magic_name__ : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
__magic_name__ : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*UpperCamelCase__ )
__magic_name__ : Tuple = odd_even_transposition(UpperCamelCase__ )
print("Sorted List\n" )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 436
| 1
|
a_ = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
a_ = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> float:
"""simple docstring"""
lowerCAmelCase__ = from_type.lower().strip("s" )
lowerCAmelCase__ = to_type.lower().strip("s" )
lowerCAmelCase__ = UNIT_SYMBOL.get(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = UNIT_SYMBOL.get(UpperCamelCase_ , UpperCamelCase_ )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCamelCase_ )}"
)
raise ValueError(UpperCamelCase_ )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCamelCase_ )}"
)
raise ValueError(UpperCamelCase_ )
lowerCAmelCase__ = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ = 1
if from_exponent > to_exponent:
lowerCAmelCase__ = from_exponent - to_exponent
else:
lowerCAmelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 115
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""codegen"""
a_ ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_ctx
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = rotary_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , )-> Any:
'''simple docstring'''
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
lowerCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , )-> Mapping[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase__ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return 13
| 115
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MaskFormerFeatureExtractor''']
lowerCAmelCase__ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
lowerCAmelCase__ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 83
|
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_UpperCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
_UpperCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __magic_name__ ( UpperCamelCase__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = SqueezeBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
lowercase =getattr(snake_case_ , normalizer_state.pop('''type''' ) )
lowercase =do_lower_case
lowercase =strip_accents
lowercase =tokenize_chinese_chars
lowercase =normalizer_class(**snake_case_ )
lowercase =do_lower_case
def _A( self , snake_case_ , snake_case_=None ):
lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 717
|
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =input('''Enter message: ''' )
lowercase =input('''Enter key [alphanumeric]: ''' )
lowercase =input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase ='''encrypt'''
lowercase =encrypt_message(lowercase_ , lowercase_ )
elif mode.lower().startswith('''d''' ):
lowercase ='''decrypt'''
lowercase =decrypt_message(lowercase_ , lowercase_ )
print(f'\n{mode.title()}ed message:' )
print(lowercase_ )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''encrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''decrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
lowercase =[]
lowercase =0
lowercase =key.upper()
for symbol in message:
lowercase =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase_ ):
lowercase =0
else:
translated.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
main()
| 145
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Any = 3
_UpperCAmelCase : List[Any] = (32, 32)
_UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
def extract(*A_ , **A_ ):
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.ones([0] )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.pixel_values.to(A_ )
return self
return Out()
return extract
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Dict = 77
_UpperCAmelCase : Optional[int] = self.dummy_image.to(A_ )
_UpperCAmelCase : List[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Tuple = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : Dict = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Dict = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , return_dict=A_ , )[0]
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : int = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.dummy_cond_unet
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[Any] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Any = 77
_UpperCAmelCase : List[Any] = self.dummy_image.to(A_ )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : Optional[Any] = vae.half()
_UpperCAmelCase : int = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : List[str] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : int = alt_pipe(
[prompt] , generator=A_ , num_inference_steps=2 , output_type="np" , image=A_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : Dict = init_image.resize((760, 504) )
_UpperCAmelCase : str = "BAAI/AltDiffusion"
_UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
_UpperCAmelCase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCAmelCase : Optional[int] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_UpperCAmelCase : Optional[int] = init_image.resize((768, 512) )
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_UpperCAmelCase : int = "BAAI/AltDiffusion"
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : int = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 300
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = random.Random()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Dict=1.0 , lowerCAmelCase: Any=None , lowerCAmelCase: Any=None ) -> Optional[int]:
if rng is None:
_UpperCAmelCase : int = global_rng
_UpperCAmelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : List[str] = min_seq_length
_UpperCAmelCase : Any = max_seq_length
_UpperCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Optional[Any] = padding_value
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Tuple = return_attention_mask
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Optional[Any] = feature_size
_UpperCAmelCase : int = chunk_length
_UpperCAmelCase : str = hop_length
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
_UpperCAmelCase : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Tuple = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
_UpperCAmelCase : Any = self.feature_extraction_class.from_pretrained(A_ )
_UpperCAmelCase : Union[str, Any] = feat_extract_first.to_dict()
_UpperCAmelCase : List[str] = feat_extract_second.to_dict()
_UpperCAmelCase : Optional[int] = feat_extract_first.mel_filters
_UpperCAmelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : int = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
_UpperCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ )
_UpperCAmelCase : List[str] = feat_extract_first.to_dict()
_UpperCAmelCase : Any = feat_extract_second.to_dict()
_UpperCAmelCase : Any = feat_extract_first.mel_filters
_UpperCAmelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Tuple = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : str = feature_extractor(A_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase : str = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase : Tuple = feature_extractor(A_ , return_tensors="np" ).input_features
_UpperCAmelCase : List[str] = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase : Optional[int] = np.asarray(A_ )
_UpperCAmelCase : List[Any] = feature_extractor(A_ , return_tensors="np" ).input_features
_UpperCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase : Tuple = [np.asarray(A_ ) for speech_input in speech_inputs]
_UpperCAmelCase : int = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
import torch
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : int = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase : Union[str, Any] = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_UpperCAmelCase : Union[str, Any] = self._load_datasamples(1 )
_UpperCAmelCase : Optional[int] = WhisperFeatureExtractor()
_UpperCAmelCase : Any = feature_extractor(A_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Optional[Any] = self._load_datasamples(1 )[0]
_UpperCAmelCase : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A_ ( __lowercase , __lowercase ):
_lowerCamelCase : List[str] = 'resnet'
_lowerCamelCase : Union[str, Any] = ['basic', 'bottleneck']
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : int=6_4 , snake_case_ : Optional[int]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , snake_case_ : int=[3, 4, 6, 3] , snake_case_ : str="bottleneck" , snake_case_ : Optional[int]="relu" , snake_case_ : Union[str, Any]=False , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[Any] , ):
super().__init__(**__A )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = downsample_in_first_stage
_UpperCAmelCase = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__A ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
class A_ ( __lowercase ):
_lowerCamelCase : Tuple = version.parse("""1.11""" )
@property
def lowercase ( self : List[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase ( self : Optional[int] ):
return 1e-3
| 714
|
'''simple docstring'''
from math import ceil
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = list(range(0 , __lowercase ) )
_UpperCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_UpperCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(__lowercase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__lowercase )
# Missing blocks
_UpperCAmelCase = [i for i in blocks if i not in device_map_blocks]
_UpperCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(__lowercase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__lowercase ) )
if len(__lowercase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__lowercase ) )
if len(__lowercase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = list(range(__lowercase ) )
_UpperCAmelCase = int(ceil(n_layers / len(__lowercase ) ) )
_UpperCAmelCase = [layers[i : i + n_blocks] for i in range(0 , __lowercase , __lowercase )]
return dict(zip(__lowercase , __lowercase ) )
| 119
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 221
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase__ : List[Any] = False
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Optional[int] , lowercase_ : str=32 ):
set_seed(0 )
snake_case_ : Any = UNetaDModel(sample_size=lowercase_ , in_channels=3 , out_channels=3 )
snake_case_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def _snake_case ( self : str ):
snake_case_ : Dict = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case_ : List[str] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowercase_ , )
snake_case_ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowercase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case_ : List[str] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase_ ) for _ in range(4 )]
snake_case_ : str = [torch.randn((4, 3, 32, 32) ).to(lowercase_ ) for _ in range(4 )]
snake_case_ : Optional[int] = [torch.randint(0 , 1000 , (4,) ).long().to(lowercase_ ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case_, snake_case_ : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase_ )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : Any = model(lowercase_ , timesteps[i] ).sample
snake_case_ : int = torch.nn.functional.mse_loss(lowercase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case_, snake_case_ : List[str] = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase_ )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Tuple = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : Union[str, Any] = model(lowercase_ , timesteps[i] ).sample
snake_case_ : int = torch.nn.functional.mse_loss(lowercase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 485
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase__ : List[str] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __lowercase ( _a ):
snake_case_ : List[str] = {}
state_dict.pop('''pixel_mean''' , _a )
state_dict.pop('''pixel_std''' , _a )
snake_case_ : Union[str, Any] = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ : Optional[int] = key.replace(_a , _a )
if re.match(_a , _a ):
snake_case_ : Union[str, Any] = int(re.match(_a , _a ).group(2 ) )
if layer_nb == 0:
snake_case_ : Optional[int] = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
snake_case_ : Union[str, Any] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
snake_case_ : List[Any] = key.replace('''layers.2''' , '''proj_out''' )
snake_case_ : Optional[Any] = value
snake_case_ : Tuple = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase ( _a , _a , _a , _a="ybelkada/segment-anything" ):
snake_case_ : Optional[Any] = hf_hub_download(_a , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
snake_case_ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case_ : Optional[Any] = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case_ : Union[str, Any] = SamConfig(
vision_config=_a , )
elif "sam_vit_h" in model_name:
snake_case_ : Tuple = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case_ : List[str] = SamConfig(
vision_config=_a , )
snake_case_ : Tuple = torch.load(_a , map_location='''cpu''' )
snake_case_ : Optional[Any] = replace_keys(_a )
snake_case_ : Any = SamImageProcessor()
snake_case_ : Optional[Any] = SamProcessor(image_processor=_a )
snake_case_ : Tuple = SamModel(_a )
hf_model.load_state_dict(_a )
snake_case_ : Tuple = hf_model.to('''cuda''' )
snake_case_ : Union[str, Any] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
snake_case_ : Union[str, Any] = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
snake_case_ : Tuple = [[[400, 650]]]
snake_case_ : List[str] = [[1]]
snake_case_ : Optional[int] = processor(images=np.array(_a ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case_ : Optional[Any] = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case_ : Tuple = ((75, 275, 1_725, 850),)
snake_case_ : Optional[Any] = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case_ : Union[str, Any] = [[[400, 650], [800, 650]]]
snake_case_ : Optional[int] = [[1, 1]]
snake_case_ : Tuple = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
lowercase__ : Any = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
lowercase__ : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 485
| 1
|
snake_case__ : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case__ : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case__ : Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 392
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase__ = [144, 192, 240]
UpperCAmelCase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase__ = [96, 120, 144]
UpperCAmelCase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase__ = [64, 80, 96]
UpperCAmelCase__ = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase__ = 0.05
UpperCAmelCase__ = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 21
UpperCAmelCase__ = """pascal-voc-id2label.json"""
else:
UpperCAmelCase__ = 1000
UpperCAmelCase__ = """imagenet-1k-id2label.json"""
UpperCAmelCase__ = """huggingface/label-files"""
UpperCAmelCase__ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=False ):
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
UpperCAmelCase__ = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
UpperCAmelCase__ = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
UpperCAmelCase__ = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
UpperCAmelCase__ = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
UpperCAmelCase__ = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase__ = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase__ = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
UpperCAmelCase__ = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
UpperCAmelCase__ = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
UpperCAmelCase__ = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
UpperCAmelCase__ = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
UpperCAmelCase__ = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
UpperCAmelCase__ = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
UpperCAmelCase__ = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if F'''.global_rep.{i}.bias''' in name:
UpperCAmelCase__ = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
UpperCAmelCase__ = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
UpperCAmelCase__ = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
UpperCAmelCase__ = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
UpperCAmelCase__ = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
UpperCAmelCase__ = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase__ = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
UpperCAmelCase__ = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase__ = """mobilevit.""" + name
return name
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
if base_model:
UpperCAmelCase__ = """"""
else:
UpperCAmelCase__ = """mobilevit."""
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(_lowerCAmelCase )
if key[:8] == "encoder.":
UpperCAmelCase__ = key[8:]
if "qkv" in key:
UpperCAmelCase__ = key.split(""".""" )
UpperCAmelCase__ = int(key_split[0][6:] ) - 1
UpperCAmelCase__ = int(key_split[3] )
UpperCAmelCase__ = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
UpperCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase__ = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = val
return orig_state_dict
def lowercase ( ):
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
UpperCAmelCase__ = get_mobilevit_config(_lowerCAmelCase )
# load original state_dict
UpperCAmelCase__ = torch.load(_lowerCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase__ = MobileViTForSemanticSegmentation(_lowerCAmelCase ).eval()
else:
UpperCAmelCase__ = MobileViTForImageClassification(_lowerCAmelCase ).eval()
UpperCAmelCase__ = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
UpperCAmelCase__ = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
UpperCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowerCAmelCase , organization="""apple""" )
model.push_to_hub(_lowerCAmelCase , organization="""apple""" )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 392
| 1
|
from __future__ import annotations
import queue
class snake_case_ :
def __init__( self , __lowercase ) -> int:
lowerCamelCase : int =data
lowerCamelCase : str =None
lowerCamelCase : Dict =None
def A__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
lowerCamelCase : Tuple =input('''Enter the value of the root node: ''' ).strip().lower()
lowerCamelCase : queue.Queue =queue.Queue()
lowerCamelCase : Dict =TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Dict =q.get()
lowerCamelCase : Optional[int] =F"Enter the left node of {node_found.data}: "
lowerCamelCase : Tuple =input(SCREAMING_SNAKE_CASE_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCamelCase : Union[str, Any] =TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Optional[Any] =left_node
q.put(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =F"Enter the right node of {node_found.data}: "
lowerCamelCase : int =input(SCREAMING_SNAKE_CASE_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCamelCase : str =TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Optional[Any] =right_node
q.put(SCREAMING_SNAKE_CASE_ )
raise
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : queue.Queue =queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Optional[int] =q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : queue.Queue =queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Tuple =[]
while not q.empty():
lowerCamelCase : List[str] =q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : list[TreeNode] =[]
lowerCamelCase : Tuple =node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =n.left
# end of while means current node doesn't have left child
lowerCamelCase : Tuple =stack.pop()
# start to traverse its right child
lowerCamelCase : Union[str, Any] =n.right
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : list[TreeNode] =[]
lowerCamelCase : List[Any] =node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =n.left
lowerCamelCase : Dict =stack.pop()
print(n.data , end=''',''' )
lowerCamelCase : Union[str, Any] =n.right
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase , lowerCamelCase : Optional[int] =[], []
lowerCamelCase : Dict =node
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase : int =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def A__ ( SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_=5_0 , SCREAMING_SNAKE_CASE_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCamelCase , lowerCamelCase : Dict =divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
snake_case_ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 262
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowerCamelCase : List[Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.