code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a : str = get_tests_dir("fixtures/test_sentencepiece.model")
a : int = {"target_lang": "fi", "source_lang": "en"}
a : Any = ">>zh<<"
a : List[Any] = "Helsinki-NLP/"
if is_torch_available():
a : Dict = "pt"
elif is_tf_available():
a : Optional[int] = "tf"
else:
a : List[Any] = "jax"
@require_sentencepiece
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[str] = MarianTokenizer
a : List[Any] = False
a : Union[str, Any] = True
def UpperCAmelCase ( self : str ) -> Optional[Any]:
super().setUp()
__UpperCAmelCase : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__UpperCAmelCase : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Dict = Path(self.tmpdirname )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] , **__lowercase : Any ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : int ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : Dict = """</s>"""
__UpperCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Any:
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowercase ) , 9 )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
__UpperCAmelCase : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(__lowercase , batch.input_ids[0] )
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Tuple = [x.name for x in Path(__lowercase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowercase )
MarianTokenizer.from_pretrained(__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__lowercase , truncation=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCAmelCase ( self : Any ) -> List[Any]:
# fmt: off
__UpperCAmelCase : Optional[int] = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : int = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__UpperCAmelCase : Optional[int] = """Tämä on testi"""
__UpperCAmelCase : Any = """This is a test"""
__UpperCAmelCase : int = [76, 7, 2047, 2]
__UpperCAmelCase : Tuple = [69, 12, 11, 940, 2]
__UpperCAmelCase : List[Any] = tokenizer(__lowercase ).input_ids
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = tokenizer(text_target=__lowercase ).input_ids
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : str = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
| 63
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50
| 0
|
class __magic_name__ :
'''simple docstring'''
def __init__( self:Any ):
snake_case__ = 0
snake_case__ = 0
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Tuple ):
if vertex not in self.adjacency:
snake_case__ = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:Union[str, Any] , _a:Dict ):
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
snake_case__ = weight
snake_case__ = weight
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
snake_case__ = list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case__ = edges[i][2] + 1
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
snake_case__ = weight
snake_case__ = weight
def __str__( self:int ):
snake_case__ = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case__ = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('''\n''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:List[str]=None , _a:Optional[int]=None ):
snake_case__ = Graph()
if vertices is None:
snake_case__ = []
if edges is None:
snake_case__ = []
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class __magic_name__ :
'''simple docstring'''
def __init__( self:List[Any] ):
snake_case__ = {}
snake_case__ = {}
def __len__( self:Union[str, Any] ):
return len(self.parent )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[Any] ):
if item in self.parent:
return self.find(_a )
snake_case__ = item
snake_case__ = 0
return item
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Union[str, Any] ):
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
snake_case__ = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Union[str, Any] , _a:str ):
snake_case__ = self.find(_a )
snake_case__ = self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case__ = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Tuple ):
snake_case__ = graph.num_vertices
snake_case__ = Graph.UnionFind()
snake_case__ = []
while num_components > 1:
snake_case__ = {}
for vertex in graph.get_vertices():
snake_case__ = -1
snake_case__ = graph.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
snake_case__ = union_find.find(_a )
snake_case__ = union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case__ , snake_case__ , snake_case__ = cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
snake_case__ = num_components - 1
snake_case__ = Graph.build(edges=_a )
return mst
| 701
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = abs(__lowerCAmelCase )
snake_case__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
snake_case__ = F"""{func.__name__}({value})"""
snake_case__ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 208
| 0
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Dict = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_snake_case : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case : str = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case : List[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_snake_case : Dict = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_snake_case : Optional[Any] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_snake_case : List[str] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_snake_case : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_snake_case : Any = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_snake_case : Optional[int] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_snake_case : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_snake_case : Union[str, Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_snake_case : List[Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_snake_case : List[Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_snake_case : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_snake_case : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_snake_case : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_snake_case : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_snake_case : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_snake_case : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_snake_case : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_snake_case : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_snake_case : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_snake_case : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_snake_case : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_snake_case : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = FLAX_MODEL_MAPPING
_snake_case : Tuple = auto_class_update(FlaxAutoModel)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_snake_case : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_snake_case : int = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_snake_case : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : List[str] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_snake_case : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_snake_case : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_snake_case : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_snake_case : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class UpperCamelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase : int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_snake_case : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 441
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : List[Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
A__ : Optional[Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
A__ : List[Any] = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _a ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
return float((preds == labels).mean() )
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple="binary" ):
lowerCAmelCase__ : Dict = simple_accuracy(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : List[Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any ):
lowerCAmelCase__ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = f'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
lowerCAmelCase__ : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Union[str, Any] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average='''macro''' )
fas.append(__UpperCamelCase )
lowerCAmelCase__ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
lowerCAmelCase__ : Dict = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = sum(__UpperCamelCase ) / len(__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif self.config_name == "cb":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ : Union[str, Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ : Optional[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 233
| 0
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase__ ( _A ):
return EnvironmentCommand()
class a__( lowerCamelCase__ ):
@staticmethod
def lowercase_ ( __snake_case : ArgumentParser ):
a : Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=__snake_case )
def lowercase_ ( self : List[str] ):
a : str = huggingface_hub.__version__
a : List[str] = 'not installed'
a : List[str] = 'NA'
if is_torch_available():
import torch
a : Optional[Any] = torch.__version__
a : int = torch.cuda.is_available()
a : Optional[int] = 'not installed'
if is_transformers_available():
import transformers
a : Tuple = transformers.__version__
a : Dict = 'not installed'
if is_accelerate_available():
import accelerate
a : int = accelerate.__version__
a : Any = 'not installed'
if is_xformers_available():
import xformers
a : Optional[int] = xformers.__version__
a : List[str] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase_ ( __snake_case : Union[str, Any] ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 195
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCAmelCase: Any = False
@skip_mps
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableDiffusionAttendAndExcitePipeline
lowercase__ = False
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowercase_ ( cls : Optional[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowercase_ ( self : List[Any] ):
torch.manual_seed(0 )
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
a : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
a : Any = CLIPTextModel(__snake_case )
a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : str , __snake_case : Tuple , __snake_case : Optional[int]=0 ):
if str(__snake_case ).startswith('mps' ):
a : Any = torch.manual_seed(__snake_case )
else:
a : Any = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : int = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowercase_ ( self : List[Any] ):
a : Union[str, Any] = 'cpu'
a : Any = self.get_dummy_components()
a : List[str] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Any = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case ).images
a : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a : str = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def lowercase_ ( self : Dict ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowercase_ ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowercase_ ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self : str ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowercase_ ( self : Any ):
super().test_save_load_local(expected_max_difference=5e-4 )
def lowercase_ ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class a__( unittest.TestCase ):
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = torch.manual_seed(51 )
a : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__snake_case , torch_dtype=torch.floataa )
pipe.to('cuda' )
a : Optional[Any] = 'a painting of an elephant with glasses'
a : Any = [5, 7]
a : Tuple = pipe(
prompt=__snake_case , token_indices=__snake_case , guidance_scale=7.5 , generator=__snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 195
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase_: Any = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase_: Union[str, Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_: Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase_: Union[str, Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCAmelCase__ )-1}''' )
if "norm" in key:
UpperCamelCase_: int = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_: Tuple = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase_: List[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCAmelCase__ )-1}''' )
if "layer_norm1" in key:
UpperCamelCase_: Tuple = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase_: List[str] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_: int = key[key.find('block' ) + len('block' )]
UpperCamelCase_: Union[str, Any] = key.replace(F'''block{idx}''' , F'''block.{int(UpperCAmelCase__ )-1}''' )
if "attn.q" in key:
UpperCamelCase_: Optional[Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase_: Dict = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase_: List[Any] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase_: List[str] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase_: Dict = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase_: Optional[Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase_: int = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase_: Dict = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_: Union[str, Any] = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase_: Tuple = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCAmelCase__ )-1}''' )
if "bot_conv" in key:
UpperCamelCase_: Optional[Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase_: Optional[int] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase_: Dict = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase_: Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase_: Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase_: Any = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase_: Union[str, Any] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase_: Optional[Any] = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase_: Optional[int] = value
return new_state_dict
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_: Tuple = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase_: int = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_: str = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_: Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_: Any = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_: Any = kv_bias[config.hidden_sizes[i] :]
def snake_case () -> Union[str, Any]:
UpperCamelCase_: Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_: int = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return image
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=None ) -> Optional[Any]:
UpperCamelCase_: Dict = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_: Union[str, Any] = GLPNImageProcessor()
# prepare image
UpperCamelCase_: str = prepare_img()
UpperCamelCase_: int = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase_: Dict = torch.load(UpperCAmelCase__ , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase_: str = rename_keys(UpperCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase_: Dict = GLPNForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# forward pass
UpperCamelCase_: Dict = model(UpperCAmelCase__ )
UpperCamelCase_: Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_: str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase_: Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCamelCase_: Optional[int] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__ , UpperCAmelCase__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__ , UpperCAmelCase__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase__ , )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
A_ : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 57
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> List[str]:
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase__ ( __lowercase ):
a__ : VQModel
a__ : CLIPTextModel
a__ : CLIPTokenizer
a__ : TransformeraDModel
a__ : LearnedClassifierFreeSamplingEmbeddings
a__ : VQDiffusionScheduler
def __init__( self : int , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Any:
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ) -> torch.FloatTensor:
__lowerCamelCase , __lowerCamelCase = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.exp(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 298
| 0
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : List[str] = logging.getLogger()
def A__ ( ) -> List[Any]:
UpperCamelCase_: str = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCamelCase_: List[str] = parser.parse_args()
return args.f
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: List[str] = {}
UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , """all_results.json""" )
if os.path.exists(lowerCamelCase ):
with open(lowerCamelCase , """r""" ) as f:
UpperCamelCase_: int = json.load(lowerCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def A__ ( ) -> Any:
UpperCamelCase_: Tuple = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
lowerCamelCase_ : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCamelCase_: Dict = tempfile.mkdtemp()
UpperCamelCase_: Union[str, Any] = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_: Any = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Optional[Any] = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
UpperCamelCase_: List[Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase_: List[str] = get_results(snake_case_ )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: str = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Tuple = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: str = get_results(snake_case_ )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Dict ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase_: List[str] = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_: str = self.get_auto_remove_tmp_dir()
UpperCamelCase_: str = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Union[str, Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase_: List[Any] = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Optional[int] = get_results(snake_case_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Tuple = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Optional[int] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Tuple = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: List[str] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Union[str, Any] = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Optional[Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """translation_no_trainer""" ) ) )
@slow
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case_ )
UpperCamelCase_: List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Optional[Any] = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: int = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: int = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
UpperCamelCase_: str = get_results(snake_case_ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """image_classification_no_trainer""" ) ) )
| 720
|
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_lowerCAmelCase = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase (unittest.TestCase ):
@classmethod
def __snake_case ( cls :Optional[int] ) ->Optional[int]:
lowercase : str = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __snake_case ( cls :Tuple ) ->str:
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def __snake_case ( self :List[str] ) ->Tuple:
lowercase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase : List[str] = FlaxBertModel(__magic_name__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowercase : int = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowercase : Dict = flatten_dict(unfreeze(model.params ) )
lowercase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="""test-model-flax""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowercase : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowercase : str = flatten_dict(unfreeze(model.params ) )
lowercase : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=f"""{key} not identical""" )
def __snake_case ( self :Optional[int] ) ->List[Any]:
lowercase : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase : Optional[Any] = FlaxBertModel(__magic_name__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowercase : str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase : Dict = flatten_dict(unfreeze(model.params ) )
lowercase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowercase : Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase : List[str] = flatten_dict(unfreeze(model.params ) )
lowercase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCamelCase ( _A , _A ) -> Tuple:
lowercase : Optional[Any] = True
lowercase : Union[str, Any] = flatten_dict(modela.params )
lowercase : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowercase : Optional[int] = False
return models_are_equal
@require_flax
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Dict ) ->Union[str, Any]:
lowercase : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase : Dict = FlaxBertModel(__magic_name__ )
lowercase : List[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
lowercase : Optional[Any] = FlaxBertModel.from_pretrained(__magic_name__ )
lowercase : Union[str, Any] = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :List[Any] ) ->List[str]:
lowercase : List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase : Optional[int] = FlaxBertModel(__magic_name__ )
lowercase : Union[str, Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="""10KB""" )
with self.assertRaises(__magic_name__ ):
lowercase : List[Any] = FlaxBertModel.from_pretrained(__magic_name__ )
lowercase : Optional[int] = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :Tuple ) ->Optional[Any]:
lowercase : int = """bert"""
lowercase : List[Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(__magic_name__ ):
lowercase : Optional[Any] = FlaxBertModel.from_pretrained(__magic_name__ )
lowercase : List[Any] = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __snake_case ( self :List[Any] ) ->int:
lowercase : str = """bert"""
lowercase : List[str] = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(__magic_name__ ):
lowercase : int = FlaxBertModel.from_pretrained(__magic_name__ )
lowercase : int = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 264
|
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> int:
lowercase : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase : List[Any] = n - k
# Calculate C(n,k)
for i in range(_A ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( _A ) -> int:
return binomial_coefficient(2 * node_count , _A ) // (node_count + 1)
def UpperCamelCase ( _A ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
lowercase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase ( _A ) -> int:
return catalan_number(_A ) * factorial(_A )
if __name__ == "__main__":
_lowerCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 264
| 1
|
import argparse
import struct
import unittest
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: bytes ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
# Initialize hash values
_SCREAMING_SNAKE_CASE = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
_SCREAMING_SNAKE_CASE = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
_SCREAMING_SNAKE_CASE = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: bytes ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + (B"""\x00""" * (63 - (len(UpperCAmelCase_ ) + 8) % 64))
_SCREAMING_SNAKE_CASE = struct.pack(""">Q""" , (len(UpperCAmelCase_ ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) )
# add 48 0-ed integers
words += [0] * 48
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_SCREAMING_SNAKE_CASE = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_SCREAMING_SNAKE_CASE = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_SCREAMING_SNAKE_CASE = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
_SCREAMING_SNAKE_CASE = self.ror(UpperCAmelCase_ , 6 ) ^ self.ror(UpperCAmelCase_ , 11 ) ^ self.ror(UpperCAmelCase_ , 25 )
_SCREAMING_SNAKE_CASE = (e & f) ^ ((~e & 0xff_fff_fff) & g)
_SCREAMING_SNAKE_CASE = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
_SCREAMING_SNAKE_CASE = self.ror(UpperCAmelCase_ , 2 ) ^ self.ror(UpperCAmelCase_ , 13 ) ^ self.ror(UpperCAmelCase_ , 22 )
_SCREAMING_SNAKE_CASE = (a & b) ^ (a & c) ^ (b & c)
_SCREAMING_SNAKE_CASE = (sa + maj) % 0x100_000_000
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
_SCREAMING_SNAKE_CASE = [a, b, c, d, e, f, g, h]
# Modify final values
_SCREAMING_SNAKE_CASE = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
_SCREAMING_SNAKE_CASE = """""".join([hex(UpperCAmelCase_ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
import hashlib
_SCREAMING_SNAKE_CASE = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(UpperCAmelCase_ ).hash , hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""-s""" ,"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument(
"""-f""" ,"""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 569
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __UpperCAmelCase :
def __init__( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str=99 , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: Dict=16 , UpperCAmelCase_: Union[str, Any]=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: int=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: int=32 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: Dict=0 , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Union[str, Any]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
_SCREAMING_SNAKE_CASE = self.decoder_seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = decoder_start_token_id
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = decoder_seq_length
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 1
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TrOCRDecoder(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval()
_SCREAMING_SNAKE_CASE = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
self.parent.assertTrue(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) + 1 )
_SCREAMING_SNAKE_CASE = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )["""last_hidden_state"""]
# select random slice
_SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__snake_case : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
__snake_case : Tuple = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__snake_case : str = True
__snake_case : List[str] = False
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
| 569
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = seq_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Any = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : str = scope
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , use_stable_embedding=a__ , )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = OpenLlamaModel(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(a__ , attention_mask=a__ )
__SCREAMING_SNAKE_CASE : Tuple = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Any = OpenLlamaModel(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
__SCREAMING_SNAKE_CASE : int = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
__SCREAMING_SNAKE_CASE : Tuple = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Dict = OpenLlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE : Any = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
__SCREAMING_SNAKE_CASE : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : str = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Any = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )["hidden_states"][0]
__SCREAMING_SNAKE_CASE : Tuple = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["hidden_states"][0]
# select random slice
__SCREAMING_SNAKE_CASE : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ : int = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = False
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = 3
__SCREAMING_SNAKE_CASE : Dict = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE : int = input_ids.ne(1 ).to(a__ )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : Tuple = "single_label_classification"
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(1 ).to(a__ )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = OpenLlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : int = 3
__SCREAMING_SNAKE_CASE : Any = "multi_label_classification"
__SCREAMING_SNAKE_CASE : List[Any] = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.ne(1 ).to(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE : Union[str, Any] = OpenLlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def a_ ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([1, 10] , config.vocab_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : List[Any] = OpenLlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
__SCREAMING_SNAKE_CASE : int = original_model(a__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : Optional[Any] = original_model(a__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : str = {"type": scaling_type, "factor": 10.0}
__SCREAMING_SNAKE_CASE : Dict = OpenLlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
__SCREAMING_SNAKE_CASE : int = scaled_model(a__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : Dict = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
| 211
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
return "".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(_SCREAMING_SNAKE_CASE )] )
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if (len(_SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_SCREAMING_SNAKE_CASE ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowercase ( _a , _a , _a , _a , _a , _a ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case_ : List[str] = ksize + 1
snake_case_ : Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_a ):
for x in range(_a ):
# distance from center
snake_case_ : Any = x - ksize // 2
snake_case_ : List[Any] = y - ksize // 2
# degree to radiant
snake_case_ : List[Any] = theta / 180 * np.pi
snake_case_ : Dict = np.cos(_theta )
snake_case_ : Optional[int] = np.sin(_theta )
# get kernel x
snake_case_ : Tuple = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ : Union[str, Any] = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase__ : List[Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase__ : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase__ : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
lowercase__ : Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase__ : Any = out / out.max() * 2_55
lowercase__ : Tuple = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 485
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The column name of the images in the files."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the training data."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the validation data."""})
_lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = {}
if self.train_dir is not None:
snake_case_ : str = self.train_dir
if self.validation_dir is not None:
snake_case_ : Union[str, Any] = self.validation_dir
snake_case_ : Tuple = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : str = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""})
_lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCAmelCase : str = field(default=lowerCAmelCase__ , metadata={"""help""": """Name or path of preprocessor config."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCAmelCase : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""})
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""})
def __lowercase ( _a ):
snake_case_ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[str] = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
snake_case_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : List[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
snake_case_ : Tuple = split['''train''']
snake_case_ : str = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Optional[int] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case_ : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case_ : Tuple = ViTMAEForPreTraining(_a )
if training_args.do_train:
snake_case_ : List[str] = ds['''train'''].column_names
else:
snake_case_ : Optional[Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
snake_case_ : Tuple = data_args.image_column_name
elif "image" in column_names:
snake_case_ : Tuple = '''image'''
elif "img" in column_names:
snake_case_ : str = '''img'''
else:
snake_case_ : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case_ : str = image_processor.size['''shortest_edge''']
else:
snake_case_ : Dict = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case_ : str = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_a ):
snake_case_ : Tuple = [transforms(_a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case_ : List[str] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case_ : Optional[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Compute absolute learning rate
snake_case_ : Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case_ : str = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Any = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : List[str] = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
snake_case_ : Optional[int] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def __lowercase ( _a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 485
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""input_values""", """padding_mask"""]
def __init__( self :str , __magic_name__ :int = 1 , __magic_name__ :int = 24_000 , __magic_name__ :float = 0.0 , __magic_name__ :float = None , __magic_name__ :float = None , **__magic_name__ :List[Any] , ) ->Dict:
super().__init__(feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , **__magic_name__ )
lowercase : Dict = chunk_length_s
lowercase : List[str] = overlap
@property
def __snake_case ( self :str ) ->Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case ( self :Dict ) ->Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :int , __magic_name__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __magic_name__ :Optional[Union[bool, str, PaddingStrategy]] = None , __magic_name__ :Optional[bool] = False , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :Optional[int] = None , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
lowercase : int = True
lowercase : int = bool(
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowercase : int = [np.asarray(__magic_name__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
lowercase : Union[str, Any] = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Union[str, Any] = [np.asarray(__magic_name__ ).T]
# verify inputs are valid
for idx, example in enumerate(__magic_name__ ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
lowercase : Union[str, Any] = None
lowercase : Optional[int] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase : Optional[int] = min(array.shape[0] for array in raw_audio )
lowercase : str = int(np.floor(max_length / self.chunk_stride ) )
lowercase : Tuple = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase : Tuple = max(array.shape[0] for array in raw_audio )
lowercase : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
lowercase : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase : Dict = """max_length"""
else:
lowercase : str = input_values
# normal padding on batch
if padded_inputs is None:
lowercase : Optional[int] = self.pad(
__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , padding=__magic_name__ , return_attention_mask=__magic_name__ , )
if padding:
lowercase : List[Any] = padded_inputs.pop("""attention_mask""" )
lowercase : Tuple = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
lowercase : List[str] = example[..., None]
input_values.append(example.T )
lowercase : Dict = input_values
if return_tensors is not None:
lowercase : int = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
| 264
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCamelCase :
def __init__( self :Any , __magic_name__ :list[tuple[float, float]] ) ->str:
lowercase : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase : Optional[int] = len(__magic_name__ ) - 1
def __snake_case ( self :List[str] , __magic_name__ :float ) ->list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __magic_name__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__magic_name__ ) , 5 ) == 1
return output_values
def __snake_case ( self :List[str] , __magic_name__ :float ) ->tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase : str = self.basis_function(__magic_name__ )
lowercase : Optional[int] = 0.0
lowercase : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __snake_case ( self :Tuple , __magic_name__ :float = 0.01 ) ->List[str]:
from matplotlib import pyplot as plt # type: ignore
lowercase : list[float] = [] # x coordinates of points to plot
lowercase : list[float] = [] # y coordinates of points to plot
lowercase : int = 0.0
while t <= 1:
lowercase : List[Any] = self.bezier_curve_function(__magic_name__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase : Union[str, Any] = [i[0] for i in self.list_of_points]
lowercase : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
__magic_name__ , __magic_name__ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__magic_name__ , __magic_name__ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 264
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : str = {
'''gpt2''': 1_0_2_4,
'''gpt2-medium''': 1_0_2_4,
'''gpt2-large''': 1_0_2_4,
'''gpt2-xl''': 1_0_2_4,
'''distilgpt2''': 1_0_2_4,
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = ["""input_ids""", """attention_mask"""]
a : Union[str, Any] = GPTaTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , **UpperCamelCase , ) -> int:
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = kwargs.pop("add_bos_token" , UpperCamelCase )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
__lowerCAmelCase = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**UpperCamelCase )
__lowerCAmelCase = add_prefix_space
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding:
__lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> BatchEncoding:
__lowerCAmelCase = kwargs.get("is_split_into_words" , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[int]:
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 717
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 0
|
from __future__ import annotations
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = 2
while True:
lowercase__ = factor_map.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if factor:
lowercase__ = factor + prime
while x in factor_map:
x += factor
lowercase__ = factor
else:
lowercase__ = prime
yield prime
prime += 1
def _a ( SCREAMING_SNAKE_CASE = 1E10 ):
"""simple docstring"""
lowercase__ = sieve()
lowercase__ = 1
while True:
lowercase__ = next(SCREAMING_SNAKE_CASE )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE )
n += 2
if __name__ == "__main__":
print(solution())
| 43
|
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
if num <= 0:
lowerCamelCase_ : Optional[int] = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(lowerCAmelCase__ )
lowerCamelCase_ : str = [True] * (num + 1)
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : List[str] = int(math.sqrt(lowerCAmelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase__ )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,lowerCAmelCase__ ):
if sieve[i] is True:
lowerCamelCase_ : Tuple = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 364
| 0
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if len(UpperCamelCase ) == 0 or len(UpperCamelCase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(UpperCamelCase ) )
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = [sequences]
__UpperCAmelCase : Dict = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *UpperCamelCase : List[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = args_parser
super().__init__(*UpperCamelCase , **UpperCamelCase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : str=True , UpperCamelCase : Any=True , UpperCamelCase : Any=TruncationStrategy.ONLY_FIRST , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
__UpperCAmelCase : Optional[int] = self.tokenizer.eos_token
try:
__UpperCAmelCase : Optional[int] = self.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , )
except Exception as e:
if "too short" in str(UpperCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__UpperCAmelCase : Tuple = self.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCamelCase__ ( self : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
if kwargs.get("""multi_class""" , UpperCamelCase ) is not None:
__UpperCAmelCase : List[str] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
__UpperCAmelCase : str = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["""hypothesis_template"""]
__UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
__UpperCAmelCase : Any = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , UpperCamelCase : Union[str, List[str]] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : str , ):
'''simple docstring'''
if len(UpperCamelCase ) == 0:
pass
elif len(UpperCamelCase ) == 1 and "candidate_labels" not in kwargs:
__UpperCAmelCase : Tuple = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : int=None , UpperCamelCase : Dict="This example is {}." ):
'''simple docstring'''
__UpperCAmelCase : int = self._args_parser(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase , UpperCamelCase ) ):
__UpperCAmelCase : Tuple = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase ) - 1,
**model_input,
}
def lowerCamelCase__ ( self : int , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = inputs["""candidate_label"""]
__UpperCAmelCase : int = inputs["""sequence"""]
__UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
__UpperCAmelCase : Dict = self.model(**UpperCamelCase )
__UpperCAmelCase : Any = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def lowerCamelCase__ ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [outputs["""candidate_label"""] for outputs in model_outputs]
__UpperCAmelCase : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
__UpperCAmelCase : int = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
__UpperCAmelCase : Dict = logits.shape[0]
__UpperCAmelCase : Optional[Any] = len(UpperCamelCase )
__UpperCAmelCase : int = N // n
__UpperCAmelCase : List[str] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__UpperCAmelCase : Optional[Any] = self.entailment_id
__UpperCAmelCase : Optional[Any] = -1 if entailment_id == 0 else 0
__UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__UpperCAmelCase : Any = np.exp(UpperCamelCase ) / np.exp(UpperCamelCase ).sum(-1 , keepdims=UpperCamelCase )
__UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__UpperCAmelCase : Optional[int] = reshaped_outputs[..., self.entailment_id]
__UpperCAmelCase : Optional[Any] = np.exp(UpperCamelCase ) / np.exp(UpperCamelCase ).sum(-1 , keepdims=UpperCamelCase )
__UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 700
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase : Tuple = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
UpperCAmelCase : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
UpperCAmelCase : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCamelCase ( _UpperCamelCase : str ) -> dict[str, int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase ( _UpperCamelCase : tuple ) -> str:
'''simple docstring'''
return x[0]
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = get_letter_count(_UpperCamelCase )
__UpperCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_UpperCamelCase )
__UpperCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_UpperCamelCase )
__UpperCAmelCase : Any = """""".join(freq_to_letter[freq] )
__UpperCAmelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_UpperCamelCase , reverse=_UpperCamelCase )
__UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_frequency_order(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
"""simple docstring"""
import numpy as np
def __snake_case ( _lowercase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __snake_case ( _lowercase ):
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 34
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : Tuple = BartphoTokenizer
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Optional[Any] = True
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
a__ = ["▁This", "▁is", "▁a", "▁t", "est"]
a__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
a__ = {"unk_token": "<unk>"}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a__ = BartphoTokenizer(lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Tuple , **lowerCamelCase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __a ( self : Tuple , lowerCamelCase : Any ):
'''simple docstring'''
a__ = "This is a là test"
a__ = "This is a<unk><unk> test"
return input_text, output_text
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = BartphoTokenizer(lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a__ = "This is a là test"
a__ = "▁This ▁is ▁a ▁l à ▁t est".split()
a__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = tokens + [tokenizer.unk_token]
a__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
| 289
|
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : list[list[float]] ) -> list[list[float]]:
a__ = []
for data in source_data:
for i, el in enumerate(__lowerCamelCase ):
if len(__lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCamelCase ) )
return data_lists
def _lowerCamelCase (__lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> list[list[float]]:
a__ = []
for dlist, weight in zip(__lowerCamelCase , __lowerCamelCase ):
a__ = min(__lowerCamelCase )
a__ = max(__lowerCamelCase )
a__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
a__ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(__lowerCamelCase )
score_lists.append(__lowerCamelCase )
return score_lists
def _lowerCamelCase (__lowerCamelCase : list[list[float]] ) -> list[float]:
a__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCamelCase ):
a__ = final_scores[j] + ele
return final_scores
def _lowerCamelCase (__lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> list[list[float]]:
a__ = get_data(__lowerCamelCase )
a__ = calculate_each_score(__lowerCamelCase , __lowerCamelCase )
a__ = generate_final_scores(__lowerCamelCase )
# append scores to source data
for i, ele in enumerate(__lowerCamelCase ):
source_data[i].append(__lowerCamelCase )
return source_data
| 289
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """decision_transformer"""
SCREAMING_SNAKE_CASE_ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Tuple , lowerCamelCase_ :str=17 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :Tuple=128 , lowerCamelCase_ :str=4_096 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[Any]=1_024 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Optional[int]=1 , lowerCamelCase_ :str=None , lowerCamelCase_ :str="relu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :Optional[int]=1e-5 , lowerCamelCase_ :List[Any]=0.02 , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[int]=50_256 , lowerCamelCase_ :Dict=50_256 , lowerCamelCase_ :str=False , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :Any , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =state_dim
lowerCamelCase__ : Any =act_dim
lowerCamelCase__ : Optional[Any] =hidden_size
lowerCamelCase__ : Any =max_ep_len
lowerCamelCase__ : List[Any] =action_tanh
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : int =n_positions
lowerCamelCase__ : Any =n_layer
lowerCamelCase__ : Union[str, Any] =n_head
lowerCamelCase__ : Dict =n_inner
lowerCamelCase__ : str =activation_function
lowerCamelCase__ : List[str] =resid_pdrop
lowerCamelCase__ : str =embd_pdrop
lowerCamelCase__ : Any =attn_pdrop
lowerCamelCase__ : Tuple =layer_norm_epsilon
lowerCamelCase__ : List[str] =initializer_range
lowerCamelCase__ : int =scale_attn_weights
lowerCamelCase__ : Any =use_cache
lowerCamelCase__ : List[Any] =scale_attn_by_inverse_layer_idx
lowerCamelCase__ : List[str] =reorder_and_upcast_attn
lowerCamelCase__ : List[str] =bos_token_id
lowerCamelCase__ : List[Any] =eos_token_id
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 174
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """timm_backbone"""
def __init__( self :Any , lowerCamelCase_ :int=None , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :int=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =backbone
lowerCamelCase__ : List[Any] =num_channels
lowerCamelCase__ : Tuple =features_only
lowerCamelCase__ : Dict =use_pretrained_backbone
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =out_indices if out_indices is not None else (-1,)
| 174
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__magic_name__ : List[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ : Optional[Any] = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__magic_name__ : Union[str, Any] = spec.loader.load_module()
__magic_name__ : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__magic_name__ : List[Any] = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__magic_name__ : str = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = []
for config_class in list(CONFIG_MAPPING.values()):
UpperCamelCase = False
# source code of `config_class`
UpperCamelCase = inspect.getsource(_UpperCamelCase)
UpperCamelCase = _re_checkpoint.findall(_UpperCamelCase)
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase , UpperCamelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
UpperCamelCase = True
break
UpperCamelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase)
if len(_UpperCamelCase) > 0:
UpperCamelCase = '\n'.join(sorted(_UpperCamelCase))
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}')
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 410
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="[UNK]" , _SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('return_tensors' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 410
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
snake_case = TypeVar("""KT""")
snake_case = TypeVar("""VT""")
class lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : str , a__ : Optional[int] = "root" , a__ : Union[str, Any] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = key
lowerCAmelCase__ : Dict = value
lowerCAmelCase__ : list[Node[KT, VT]] = []
def __repr__( self : List[Any] ):
'''simple docstring'''
return F'''Node({self.key}: {self.value})'''
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
return len(self.forward )
class lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : Optional[Any] , a__ : str = 0.5 , a__ : Optional[Any] = 16 ):
'''simple docstring'''
lowerCAmelCase__ : Node[KT, VT] = Node[KT, VT]()
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Union[str, Any] = p
lowerCAmelCase__ : Dict = max_level
def __str__( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = list(self )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return F'''SkipList(level={self.level})'''
lowerCAmelCase__ : str = max((len(str(SCREAMING_SNAKE_CASE__ ) ) for item in items) , default=4 )
lowerCAmelCase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE__ , 4 ) + 4
lowerCAmelCase__ : Union[str, Any] = self.head
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : str = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE__ , "-" ) + "* " * len(SCREAMING_SNAKE_CASE__ ) )
lines.append(" " * label_size + "| " * len(SCREAMING_SNAKE_CASE__ ) )
while len(node.forward ) != 0:
lowerCAmelCase__ : List[Any] = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Dict = node.forward
lines.append("None".ljust(SCREAMING_SNAKE_CASE__ ) + "* " * len(SCREAMING_SNAKE_CASE__ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(SCREAMING_SNAKE_CASE__ )
def __iter__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ : str = node.forward[0]
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _A ( self : List[str] , a__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ : Optional[int] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(SCREAMING_SNAKE_CASE__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _A ( self : Tuple , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
for i, update_node in enumerate(SCREAMING_SNAKE_CASE__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ : Optional[int] = node.forward[i]
else:
lowerCAmelCase__ : List[str] = update_node.forward[:i]
def _A ( self : str , a__ : int , a__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
lowerCAmelCase__ : Union[str, Any] = value
else:
lowerCAmelCase__ : Tuple = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , SCREAMING_SNAKE_CASE__ ):
update_vector.append(self.head )
lowerCAmelCase__ : Tuple = level
lowerCAmelCase__ : int = Node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ : str = new_node
def _A ( self : Union[str, Any] , a__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
return node.value
return None
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
lowerCAmelCase__ : List[Any] = skip_list.head
lowerCAmelCase__ : Any = {}
while node.level != 0:
lowerCAmelCase__ : int = node.forward[0]
lowerCAmelCase__ : Tuple = node.value
assert len(a_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
lowerCAmelCase__ : Tuple = skip_list.head
lowerCAmelCase__ : Optional[Any] = {}
while node.level != 0:
lowerCAmelCase__ : Union[str, Any] = node.forward[0]
lowerCAmelCase__ : List[str] = node.value
if len(a_ ) != 4:
print()
assert len(a_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(lowerCamelCase_ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(a_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCAmelCase_ ( ):
"""simple docstring"""
def is_sorted(lowerCamelCase_ ):
return all(next_item >= item for item, next_item in zip(a_ , lst[1:] ) )
lowerCAmelCase__ : List[str] = SkipList()
for i in range(1_0 ):
skip_list.insert(a_ , a_ )
assert is_sorted(list(a_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(a_ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(a_ ) )
def UpperCAmelCase_ ( ):
"""simple docstring"""
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 378
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCamelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCamelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print("\n".join(upper_files) + "\n")
lowerCamelCase_ = [file for file in filepaths if " " in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print("\n".join(space_files) + "\n")
lowerCamelCase_ = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print("\n".join(hyphen_files) + "\n")
lowerCamelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print("\n".join(nodir_files) + "\n")
lowerCamelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 498
| 0
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _a :
def __init__( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any=99 , UpperCamelCase_: Any=13 , UpperCamelCase_: str=7 , UpperCamelCase_: int=9 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Dict=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: Optional[Any]=37 , UpperCamelCase_: Union[str, Any]=8 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Dict=0.002 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Union[str, Any]=0 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Tuple=None , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = encoder_seq_length
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = d_ff
lowercase__ = relative_attention_num_buckets
lowercase__ = dropout_rate
lowercase__ = initializer_factor
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = None
lowercase__ = decoder_layers
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: str=None , UpperCamelCase_: int=None , UpperCamelCase_: int=None , UpperCamelCase_: str=None , UpperCamelCase_: int=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
lowercase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase_ )
if decoder_head_mask is None:
lowercase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ )
if cross_attn_head_mask is None:
lowercase__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase__ = input_ids.clamp(self.pad_token_id + 1 )
lowercase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase__ = self.get_config()
lowercase__ = config.num_attention_heads
lowercase__ = self.prepare_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, input_dict
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , ) -> int:
"""simple docstring"""
lowercase__ = UMTaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , )
lowercase__ = model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowercase__ = result.last_hidden_state
lowercase__ = result.past_key_values
lowercase__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = UMTaModel(config=UpperCamelCase_ ).get_decoder().to(UpperCamelCase_ ).eval()
# first forward pass
lowercase__ = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
lowercase__ , lowercase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: int , ) -> List[str]:
"""simple docstring"""
lowercase__ = UMTaModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).half().eval()
lowercase__ = model(**UpperCamelCase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCamelCase_ ).any().item() )
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowercase : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowercase : Union[str, Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : str = False
_lowercase : List[str] = False
_lowercase : List[str] = True
_lowercase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowercase : Any = [0.8, 0.9]
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=UpperCamelCase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = config_and_inputs[0]
lowercase__ = UMTaForConditionalGeneration(UpperCamelCase_ ).eval()
model.to(UpperCamelCase_ )
lowercase__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ),
}
for attn_name, (name, mask) in zip(UpperCamelCase_ , head_masking.items() ):
lowercase__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ )
lowercase__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , **UpperCamelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ )
lowercase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCamelCase_ , legacy=UpperCamelCase_ )
lowercase__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ ).input_ids
# fmt: off
lowercase__ = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model.generate(input_ids.to(UpperCamelCase_ ) )
lowercase__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowercase__ = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 429
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = DownBlockaD # noqa F405
_lowercase : str = '''down'''
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = ResnetDownsampleBlockaD # noqa F405
_lowercase : Any = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = AttnDownBlockaD # noqa F405
_lowercase : Any = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
_lowercase : Dict = '''down'''
@property
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405
_lowercase : Union[str, Any] = '''down'''
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : Dict = '''mid'''
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : int = '''mid'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = UpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = CrossAttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = AttnUpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = SkipUpBlockaD # noqa F405
_lowercase : int = '''up'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = AttnSkipUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpDecoderBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 429
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = CTRLTokenizer
__a = False
__a = False
def UpperCamelCase_ ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__: List[str]= ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE__: str= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__: List[Any]= ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE__: Union[str, Any]= {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__: int= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__: Any= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE__: str= '''adapt react readapt apt'''
return input_text, output_text
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__: int= '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE__: int= '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE__: int= tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__: List[Any]= [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
| 64
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Union[str, Any]:
lowercase__ : Tuple = '''backbone.''' if is_semantic else ''''''
lowercase__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
lowercase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowercase__ : int = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase__ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase__ : str = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : str = q_bias
lowercase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase__ : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowercase__ : Optional[int] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowercase__ : int = gamma_a
lowercase__ : List[str] = gamma_a
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : str = dct.pop(__lowerCamelCase )
lowercase__ : Optional[Any] = val
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Dict = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
lowercase__ : List[Any] = False if '''rvlcdip''' in checkpoint_url else True
lowercase__ : Optional[int] = BeitConfig(use_absolute_position_embeddings=__lowerCamelCase , use_mask_token=__lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase__ : Dict = 10_24
lowercase__ : Any = 40_96
lowercase__ : Optional[Any] = 24
lowercase__ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase__ : str = 16
lowercase__ : Optional[int] = '''huggingface/label-files'''
lowercase__ : int = '''rvlcdip-id2label.json'''
lowercase__ : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase__ : Optional[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
lowercase__ : int = create_rename_keys(__lowerCamelCase , has_lm_head=__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , has_lm_head=__lowerCamelCase )
# load HuggingFace model
lowercase__ : str = BeitForMaskedImageModeling(__lowerCamelCase ) if has_lm_head else BeitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image
lowercase__ : int = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase )
lowercase__ : List[Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : int = encoding['''pixel_values''']
lowercase__ : str = model(__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits
# verify logits
lowercase__ : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__lowerCamelCase ), "Shape of logits not as expected"
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowercase__ : List[str] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowercase__ : Tuple = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__lowerCamelCase , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowerCAmelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 560
| 0
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """PoolFormerConfig"""
# Base docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = [1, 5_1_2, 7, 7]
# Image classification docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_lowerCAmelCase : List[str] = 1 - drop_prob
_lowerCAmelCase : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_lowerCAmelCase : str = keep_prob + torch.rand(_lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_lowerCAmelCase : Any = input.div(_lowerCamelCase ) * random_tensor
return output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = drop_prob
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return drop_path(_A ,self.drop_prob ,self.training )
def __lowerCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = patch_size if isinstance(_A ,collections.abc.Iterable ) else (patch_size, patch_size)
_lowerCAmelCase : Union[str, Any] = stride if isinstance(_A ,collections.abc.Iterable ) else (stride, stride)
_lowerCAmelCase : Optional[Any] = padding if isinstance(_A ,collections.abc.Iterable ) else (padding, padding)
_lowerCAmelCase : List[Any] = nn.Convad(_A ,_A ,kernel_size=_A ,stride=_A ,padding=_A )
_lowerCAmelCase : Any = norm_layer(_A ) if norm_layer else nn.Identity()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.projection(_A )
_lowerCAmelCase : Union[str, Any] = self.norm(_A )
return embeddings
class __UpperCamelCase ( nn.GroupNorm ):
def __init__( self ,_A ,**_A ):
'''simple docstring'''
super().__init__(1 ,_A ,**_A )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.AvgPoolad(_A ,stride=1 ,padding=pool_size // 2 ,count_include_pad=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.pool(_A ) - hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = nn.Convad(_A ,_A ,1 )
_lowerCAmelCase : Optional[Any] = nn.Convad(_A ,_A ,1 )
_lowerCAmelCase : Union[str, Any] = PoolFormerDropPath(_A )
if isinstance(config.hidden_act ,_A ):
_lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : str = config.hidden_act
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.conva(_A )
_lowerCAmelCase : Optional[Any] = self.act_fn(_A )
_lowerCAmelCase : List[str] = self.drop(_A )
_lowerCAmelCase : Union[str, Any] = self.conva(_A )
_lowerCAmelCase : Any = self.drop(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = PoolFormerPooling(_A )
_lowerCAmelCase : int = PoolFormerOutput(_A ,_A ,_A ,_A )
_lowerCAmelCase : List[Any] = PoolFormerGroupNorm(_A )
_lowerCAmelCase : Dict = PoolFormerGroupNorm(_A )
# Useful for training neural nets
_lowerCAmelCase : Optional[Any] = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
_lowerCAmelCase : Any = config.use_layer_scale
if config.use_layer_scale:
_lowerCAmelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) ,requires_grad=_A )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) ,requires_grad=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.use_layer_scale:
_lowerCAmelCase : Optional[int] = self.pooling(self.before_norm(_A ) )
_lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_lowerCAmelCase : Union[str, Any] = hidden_states + self.drop_path(_A )
_lowerCAmelCase : Union[str, Any] = ()
_lowerCAmelCase : Optional[int] = self.output(self.after_norm(_A ) )
_lowerCAmelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_lowerCAmelCase : int = hidden_states + self.drop_path(_A )
_lowerCAmelCase : int = (output,) + outputs
return outputs
else:
_lowerCAmelCase : List[Any] = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
_lowerCAmelCase : int = pooling_output + hidden_states
_lowerCAmelCase : List[str] = ()
# Second residual connection inside the PoolFormerOutput block
_lowerCAmelCase : Tuple = self.drop_path(self.output(self.after_norm(_A ) ) )
_lowerCAmelCase : str = hidden_states + layer_output
_lowerCAmelCase : Union[str, Any] = (output,) + outputs
return outputs
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
_lowerCAmelCase : str = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
_lowerCAmelCase : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
_lowerCAmelCase : Dict = nn.ModuleList(_A )
# Transformer blocks
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_lowerCAmelCase : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(_A ) )
_lowerCAmelCase : Tuple = nn.ModuleList(_A )
def __lowerCamelCase ( self ,_A ,_A=False ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : Dict = () if output_hidden_states else None
_lowerCAmelCase : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
_lowerCAmelCase : Optional[int] = layers
# Get patch embeddings from hidden_states
_lowerCAmelCase : Dict = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
_lowerCAmelCase : Optional[int] = blk(_A )
_lowerCAmelCase : int = layer_outputs[0]
if output_hidden_states:
_lowerCAmelCase : List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A ,hidden_states=_A )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = PoolFormerConfig
_UpperCAmelCase = "poolformer"
_UpperCAmelCase = "pixel_values"
_UpperCAmelCase = True
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCamelCase ( self ,_A ,_A=False ):
'''simple docstring'''
if isinstance(_A ,_A ):
_lowerCAmelCase : Any = value
_lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : int = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowerCAmelCase : List[Any] = self.encoder(
_A ,output_hidden_states=_A ,return_dict=_A ,)
_lowerCAmelCase : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A ,hidden_states=encoder_outputs.hidden_states ,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = nn.Linear(config.hidden_size ,config.hidden_size )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.dense(_A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = PoolFormerModel(_A )
# Final norm
_lowerCAmelCase : Tuple = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_lowerCAmelCase : Tuple = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.poolformer(
_A ,output_hidden_states=_A ,return_dict=_A ,)
_lowerCAmelCase : Tuple = outputs[0]
_lowerCAmelCase : Any = self.classifier(self.norm(_A ).mean([-2, -1] ) )
_lowerCAmelCase : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : str = 'single_label_classification'
else:
_lowerCAmelCase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase : Tuple = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Union[str, Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowerCAmelCase : List[str] = loss_fct(_A ,_A )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : List[str] = BCEWithLogitsLoss()
_lowerCAmelCase : Any = loss_fct(_A ,_A )
if not return_dict:
_lowerCAmelCase : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A ,logits=_A ,hidden_states=outputs.hidden_states )
| 715
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( A ) -> Optional[Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class a__ ( a__ ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCamelCase_ , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = model
lowerCAmelCase__ = cache
lowerCAmelCase__ = force
lowerCAmelCase__ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 90
|
def _lowercase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def _lowercase ( ):
snake_case__ = int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 214
| 0
|
from __future__ import annotations
from collections.abc import Generator
def _a ( ) -> Generator[int, None, None]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = 2
while True:
lowerCAmelCase__ = factor_map.pop(UpperCamelCase_ , UpperCamelCase_ )
if factor:
lowerCAmelCase__ = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase__ = factor
else:
lowerCAmelCase__ = prime
yield prime
prime += 1
def _a ( UpperCamelCase_ : float = 1e1_0 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = sieve()
lowerCAmelCase__ = 1
while True:
lowerCAmelCase__ = next(UpperCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 715
|
from collections import defaultdict
from math import ceil, sqrt
def _a ( UpperCamelCase_ : int = 1_000_000 , UpperCamelCase_ : int = 10 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 115
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : str = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
for attribute in key.split(""".""" ):
lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase = value
elif weight_type == "weight_g":
lowerCamelCase = value
elif weight_type == "weight_v":
lowerCamelCase = value
elif weight_type == "bias":
lowerCamelCase = value
elif weight_type == "running_mean":
lowerCamelCase = value
elif weight_type == "running_var":
lowerCamelCase = value
elif weight_type == "num_batches_tracked":
lowerCamelCase = value
elif weight_type == "inv_freq":
lowerCamelCase = value
else:
lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = []
lowerCamelCase = fairseq_model.state_dict()
lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase = True
if "*" in mapped_key:
lowerCamelCase = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowerCamelCase = mapped_key.replace("""*""" , lowerCamelCase__ )
if "pos_bias_u" in name:
lowerCamelCase = None
elif "pos_bias_v" in name:
lowerCamelCase = None
elif "weight_g" in name:
lowerCamelCase = """weight_g"""
elif "weight_v" in name:
lowerCamelCase = """weight_v"""
elif "bias" in name:
lowerCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase = """weight"""
elif "running_mean" in name:
lowerCamelCase = """running_mean"""
elif "inv_freq" in name:
lowerCamelCase = """inv_freq"""
elif "running_var" in name:
lowerCamelCase = """running_var"""
elif "num_batches_tracked" in name:
lowerCamelCase = """num_batches_tracked"""
else:
lowerCamelCase = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
lowerCamelCase = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase = name.split(""".""" )
lowerCamelCase = int(items[0] )
lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ) -> int:
if config_path is not None:
lowerCamelCase = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="""swish""" )
else:
lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase = """rotary"""
if is_finetuned:
if dict_path:
lowerCamelCase = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase = target_dict.pad_index
lowerCamelCase = target_dict.bos_index
lowerCamelCase = target_dict.eos_index
lowerCamelCase = len(target_dict.symbols )
lowerCamelCase = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase = 0
lowerCamelCase = 1
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowerCamelCase = True if config.feat_extract_norm == """layer""" else False
lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCamelCase = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCamelCase = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
lowerCamelCase = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCamelCase = argparse.Namespace(task="""audio_pretraining""" )
lowerCamelCase = fairseq.tasks.setup_task(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowerCamelCase = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 543
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE_( self ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> torch.Tensor:
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(lowercase ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(lowercase )
lowerCamelCase_ = rays.view(lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(lowercase , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(lowercase , -1 , 2 )
lowerCamelCase_ = (
self.z.view(lowercase , 1 , 3 )
+ self.x.view(lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=lowercase )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase , *lowercase , 2 , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase , height=lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
lowerCamelCase_ = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
lowerCamelCase_ = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 463
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = 'Hello, World!'
_lowercase = 'en_XX'
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : bool ):
A = Path('data_bin' )
A = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_lowerCAmelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A = xmod.model.encoder.sentence_encoder
A = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('Our X-MOD config:' , _lowerCAmelCase )
A = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A = xmod_sent_encoder.embed_tokens.weight
A = xmod_sent_encoder.embed_positions.weight
A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A = xmod_sent_encoder.layernorm_embedding.weight
A = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A = model.roberta.encoder.layer[i]
A = xmod_sent_encoder.layers[i]
# self attention
A = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
A = xmod_layer.self_attn.q_proj.weight
A = xmod_layer.self_attn.q_proj.bias
A = xmod_layer.self_attn.k_proj.weight
A = xmod_layer.self_attn.k_proj.bias
A = xmod_layer.self_attn.v_proj.weight
A = xmod_layer.self_attn.v_proj.bias
# self-attention output
A = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
A = xmod_layer.self_attn.out_proj.weight
A = xmod_layer.self_attn.out_proj.bias
A = xmod_layer.self_attn_layer_norm.weight
A = xmod_layer.self_attn_layer_norm.bias
# intermediate
A = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
A = xmod_layer.fca.weight
A = xmod_layer.fca.bias
# output
A = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
A = xmod_layer.fca.weight
A = xmod_layer.fca.bias
A = xmod_layer.final_layer_norm.weight
A = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A = xmod_layer.adapter_layer_norm.weight
A = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A = bert_output.adapter_modules[lang_code]
A = xmod_layer.adapter_modules[lang_code]
A = from_adapter.fca.weight
A = from_adapter.fca.bias
A = from_adapter.fca.weight
A = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A = xmod_sent_encoder.layer_norm.weight
A = xmod_sent_encoder.layer_norm.bias
if classification_head:
A = xmod.model.classification_heads["mnli"].dense.weight
A = xmod.model.classification_heads["mnli"].dense.bias
A = xmod.model.classification_heads["mnli"].out_proj.weight
A = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A = xmod.model.encoder.lm_head.dense.weight
A = xmod.model.encoder.lm_head.dense.bias
A = xmod.model.encoder.lm_head.layer_norm.weight
A = xmod.model.encoder.lm_head.layer_norm.bias
A = xmod.model.encoder.lm_head.weight
A = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A = model(_lowerCAmelCase )[0]
if classification_head:
A = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase ) )
else:
A = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
A = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowercase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 711
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean
| 22
| 0
|
import cmath
import math
def __magic_name__ ( __a : float , __a : float , __a : float , __a : float ):
'''simple docstring'''
UpperCamelCase__ = math.radians(__a )
UpperCamelCase__ = math.radians(__a )
# Convert voltage and current to rectangular form
UpperCamelCase__ = cmath.rect(__a , __a )
UpperCamelCase__ = cmath.rect(__a , __a )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """van"""
def __init__(self , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[64, 1_28, 3_20, 5_12] , SCREAMING_SNAKE_CASE_=[3, 3, 12, 3] , SCREAMING_SNAKE_CASE_=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=1E-2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = hidden_act
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = dropout_rate
| 513
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
UpperCAmelCase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
UpperCAmelCase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
UpperCAmelCase = BeautifulSoup(res.text, """html.parser""")
UpperCAmelCase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 342
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : Dict , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=True , __UpperCamelCase : Tuple="[UNK]" , __UpperCamelCase : List[str]="[SEP]" , __UpperCamelCase : Tuple="[PAD]" , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : Optional[int]="[MASK]" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : List[Any] , ) -> Any:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**__UpperCamelCase )
_UpperCamelCase = do_lower_case
def _UpperCamelCase ( self : int , __UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> str:
_UpperCamelCase = PaddingStrategy.MAX_LENGTH
_UpperCamelCase = text
_UpperCamelCase = kwargs.pop('''text_pair''' , __UpperCamelCase )
_UpperCamelCase = kwargs.pop('''return_tensors''' , __UpperCamelCase )
_UpperCamelCase = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__UpperCamelCase ):
if batch_text_pair is not None:
_UpperCamelCase = batch_text_pair[idx]
else:
_UpperCamelCase = None
_UpperCamelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = encoded_candidates.get('''input_ids''' )
_UpperCamelCase = encoded_candidates.get('''attention_mask''' )
_UpperCamelCase = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__UpperCamelCase )
_UpperCamelCase = {key: item for key, item in output_data.items() if len(__UpperCamelCase ) != 0}
return BatchEncoding(__UpperCamelCase , tensor_type=__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=None ) -> int:
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 342
| 1
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Dict = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
lowercase__ : Optional[int] = Dataset.from_dict(SCREAMING_SNAKE_CASE_ )
return dataset
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def snake_case_ ( self):
lowercase__ : Optional[Any] = get_dataset()
lowercase__ : Union[str, Any] = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def snake_case_ ( self):
lowercase__ : Optional[int] = get_dataset()
lowercase__ , lowercase__ : List[Any] = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a)
| 164
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , __snake_case ):
__lowerCamelCase : Optional[int] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , a , a , a = None , a = 5_0257 , a = 1024 , a = 768 , a = 12 , a = 12 , a = None , a = "gelu_new" , a = 0.1 , a = 0.1 , a = 0.1 , a = 1e-5 , a = 0.02 , a = True , a = True , a = False , a = False , ):
super().__init__()
lowercase__ : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""")
lowercase__ : Any = prefix_inner_dim
lowercase__ : List[str] = prefix_hidden_dim
lowercase__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase__ : Dict = (
nn.Linear(self.prefix_hidden_dim , a) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase__ : Tuple = GPTaConfig(
vocab_size=a , n_positions=a , n_embd=a , n_layer=a , n_head=a , n_inner=a , activation_function=a , resid_pdrop=a , embd_pdrop=a , attn_pdrop=a , layer_norm_epsilon=a , initializer_range=a , scale_attn_weights=a , use_cache=a , scale_attn_by_inverse_layer_idx=a , reorder_and_upcast_attn=a , )
lowercase__ : Tuple = GPTaLMHeadModel(a)
def snake_case_ ( self , a , a , a = None , a = None , ):
lowercase__ : Optional[Any] = self.transformer.transformer.wte(a)
lowercase__ : Optional[Any] = self.encode_prefix(a)
lowercase__ : Union[str, Any] = self.decode_prefix(a)
lowercase__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase__ : Optional[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase__ : Optional[int] = torch.cat((dummy_token, input_ids) , dim=1)
lowercase__ : Optional[Any] = self.transformer(inputs_embeds=a , labels=a , attention_mask=a)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self , a , a):
return torch.zeros(a , self.prefix_length , dtype=torch.intaa , device=a)
def snake_case_ ( self , a):
return self.encode_prefix(a)
@torch.no_grad()
def snake_case_ ( self , a , a , a):
lowercase__ : List[str] = torch.split(a , 1 , dim=0)
lowercase__ : Optional[Any] = []
lowercase__ : str = []
for feature in features:
lowercase__ : Dict = self.decode_prefix(feature.to(a)) # back to the clip feature
# Only support beam search for now
lowercase__ , lowercase__ : str = self.generate_beam(
input_embeds=a , device=a , eos_token_id=a)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase__ : str = torch.stack(a)
lowercase__ : List[str] = torch.stack(a)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self , a=None , a=None , a=None , a = 5 , a = 67 , a = 1.0 , a = None , ):
lowercase__ : Optional[int] = eos_token_id
lowercase__ : List[Any] = None
lowercase__ : int = None
lowercase__ : str = torch.ones(a , device=a , dtype=torch.int)
lowercase__ : List[Any] = torch.zeros(a , device=a , dtype=torch.bool)
if input_embeds is not None:
lowercase__ : int = input_embeds
else:
lowercase__ : int = self.transformer.transformer.wte(a)
for i in range(a):
lowercase__ : Union[str, Any] = self.transformer(inputs_embeds=a)
lowercase__ : Optional[int] = outputs.logits
lowercase__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase__ : Union[str, Any] = logits.softmax(-1).log()
if scores is None:
lowercase__ , lowercase__ : Tuple = logits.topk(a , -1)
lowercase__ : Dict = generated.expand(a , *generated.shape[1:])
lowercase__ , lowercase__ : Dict = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase__ : Union[str, Any] = next_tokens
else:
lowercase__ : Dict = tokens.expand(a , *tokens.shape[1:])
lowercase__ : Dict = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase__ : str = -float(np.inf)
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase__ : str = scores_sum / seq_lengths[:, None]
lowercase__ , lowercase__ : List[str] = scores_sum_average.view(-1).topk(a , -1)
lowercase__ : List[str] = next_tokens // scores_sum.shape[1]
lowercase__ : List[Any] = seq_lengths[next_tokens_source]
lowercase__ : Dict = next_tokens % scores_sum.shape[1]
lowercase__ : Tuple = next_tokens.unsqueeze(1)
lowercase__ : Union[str, Any] = tokens[next_tokens_source]
lowercase__ : Any = torch.cat((tokens, next_tokens) , dim=1)
lowercase__ : List[str] = generated[next_tokens_source]
lowercase__ : Union[str, Any] = scores_sum_average * seq_lengths
lowercase__ : str = is_stopped[next_tokens_source]
lowercase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase__ : Optional[Any] = torch.cat((generated, next_token_embed) , dim=1)
lowercase__ : Optional[Any] = is_stopped + next_tokens.eq(a).squeeze()
if is_stopped.all():
break
lowercase__ : Dict = scores / seq_lengths
lowercase__ : Optional[Any] = scores.argsort(descending=a)
# tokens tensors are already padded to max_seq_length
lowercase__ : int = [tokens[i] for i in order]
lowercase__ : Optional[int] = torch.stack(a , dim=0)
lowercase__ : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 164
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''PoolFormerFeatureExtractor''']
lowerCAmelCase__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 81
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase_ ( metaclass=__A ):
'''simple docstring'''
_lowercase = ["flax", "transformers"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(cls , ['flax', 'transformers'] )
| 220
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' )
| 39
| 0
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 0
if start < end:
UpperCamelCase__ :Union[str, Any] = randint(__a , __a )
UpperCamelCase__ :Dict = a[end]
UpperCamelCase__ :Union[str, Any] = a[pivot]
UpperCamelCase__ :List[str] = temp
UpperCamelCase__ :str = _in_place_partition(__a , __a , __a )
count += _in_place_quick_sort(__a , __a , p - 1 )
count += _in_place_quick_sort(__a , p + 1 , __a )
return count
def a ( __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Any = 0
UpperCamelCase__ :int = randint(__a , __a )
UpperCamelCase__ :Optional[Any] = a[end]
UpperCamelCase__ :Any = a[pivot]
UpperCamelCase__ :Optional[int] = temp
UpperCamelCase__ :int = start - 1
for index in range(__a , __a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase__ :str = new_pivot_index + 1
UpperCamelCase__ :Any = a[new_pivot_index]
UpperCamelCase__ :Dict = a[index]
UpperCamelCase__ :Any = temp
UpperCamelCase__ :Optional[int] = a[new_pivot_index + 1]
UpperCamelCase__ :Union[str, Any] = a[end]
UpperCamelCase__ :Optional[Any] = temp
return new_pivot_index + 1, count
__snake_case = TemporaryFile()
__snake_case = 100 # 1000 elements are to be sorted
__snake_case , __snake_case = 0, 1 # mean and standard deviation
__snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
__snake_case = np.load(outfile)
__snake_case = len(M) - 1
__snake_case = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 718
|
'''simple docstring'''
import json
import sys
def a ( __a , __a ) -> str:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :List[str] = json.load(__a )
UpperCamelCase__ :int = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__a ):
UpperCamelCase__ :Optional[Any] = results[benchmark_name]
UpperCamelCase__ :int = benchmark_name.split('''/''' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
UpperCamelCase__ :List[str] = '''| metric |'''
UpperCamelCase__ :str = '''|--------|'''
UpperCamelCase__ :Union[str, Any] = '''| new / old (diff) |'''
for metric_name in sorted(__a ):
UpperCamelCase__ :List[Any] = benchmark_res[metric_name]
UpperCamelCase__ :Optional[int] = metric_vals['''new''']
UpperCamelCase__ :Any = metric_vals.get('''old''' , __a )
UpperCamelCase__ :Optional[int] = metric_vals.get('''diff''' , __a )
UpperCamelCase__ :List[str] = f''' {new_val:f}''' if isinstance(__a , (int, float) ) else '''None'''
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(__a , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(__a , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__a ) )
if __name__ == "__main__":
__snake_case = sys.argv[1]
__snake_case = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 280
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : str = logging.get_logger(__name__)
def _A ( __snake_case :Optional[Any] , __snake_case :int ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def _A ( __snake_case :List[Any] , __snake_case :str ) -> Optional[Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__SCREAMING_SNAKE_CASE = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _A ( __snake_case :List[Any] , __snake_case :Tuple , __snake_case :List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dct.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
def _A ( __snake_case :Dict ) -> Optional[int]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
__SCREAMING_SNAKE_CASE = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__SCREAMING_SNAKE_CASE = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def _A ( __snake_case :Optional[Any] , __snake_case :Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 1024
else:
raise ValueError("Should either find \'base\' or \'large\' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = '''relu'''
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
# load HuggingFace model
__SCREAMING_SNAKE_CASE = ViTModel(UpperCamelCase_ , add_pooling_layer=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = TrOCRForCausalLM(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
model.eval()
# load state_dict of original model, rename some keys
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" , check_hash=UpperCamelCase_ )['''model''']
__SCREAMING_SNAKE_CASE = create_rename_keys(UpperCamelCase_ , UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
if key.startswith("decoder" ) and "output_projection" not in key:
__SCREAMING_SNAKE_CASE = val
else:
__SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image
__SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
__SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("roberta-large" )
__SCREAMING_SNAKE_CASE = TrOCRProcessor(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = processor(images=prepare_img(UpperCamelCase_ ) , return_tensors="pt" ).pixel_values
# verify logits
__SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__SCREAMING_SNAKE_CASE = model(pixel_values=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 693
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False, False, False
@dataclass
class UpperCAmelCase :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase = field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self : Any ):
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ :int = BytesIO()
sf.write(__lowerCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ :List[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ :Optional[Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ :Optional[Any] = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
UpperCAmelCase__ , UpperCAmelCase__ :str = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
UpperCAmelCase__ :List[str] = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
UpperCAmelCase__ :Optional[Any] = token_per_repo_id or {}
UpperCAmelCase__ :str = path.split('''::''' )[-1]
try:
UpperCAmelCase__ :Tuple = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase__ :str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ :Tuple = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = sf.read(__lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = sf.read(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = array.T
if self.mono:
UpperCAmelCase__ :Any = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ :Union[str, Any] = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ :List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
UpperCAmelCase__ :Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ :str = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
UpperCAmelCase__ :Any = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase__ :str = storage.field('''bytes''' )
else:
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase__ :Optional[int] = storage.field('''path''' )
else:
UpperCAmelCase__ :Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Dict ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
UpperCAmelCase__ :Any = f.read()
return bytes_
UpperCAmelCase__ :Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ :Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ :Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 467
| 0
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
random.seed(__A )
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# ^^ safe to call this function even if cuda is not available
class lowercase_ :
def __init__( self , a , a = 0.9999 , a = 0.0 , a = 0 , a = False , a = 1.0 , a = 2 / 3 , a = None , a = None , **a , ):
if isinstance(a , torch.nn.Module ):
UpperCamelCase__ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , a , standard_warn=a , )
UpperCamelCase__ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase__ = True
if kwargs.get("max_value" , a ) is not None:
UpperCamelCase__ = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , a , standard_warn=a )
UpperCamelCase__ = kwargs["max_value"]
if kwargs.get("min_value" , a ) is not None:
UpperCamelCase__ = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , a , standard_warn=a )
UpperCamelCase__ = kwargs["min_value"]
UpperCamelCase__ = list(a )
UpperCamelCase__ = [p.clone().detach() for p in parameters]
if kwargs.get("device" , a ) is not None:
UpperCamelCase__ = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , a , standard_warn=a )
self.to(device=kwargs["device"] )
UpperCamelCase__ = None
UpperCamelCase__ = decay
UpperCamelCase__ = min_decay
UpperCamelCase__ = update_after_step
UpperCamelCase__ = use_ema_warmup
UpperCamelCase__ = inv_gamma
UpperCamelCase__ = power
UpperCamelCase__ = 0
UpperCamelCase__ = None # set in `step()`
UpperCamelCase__ = model_cls
UpperCamelCase__ = model_config
@classmethod
def __a ( cls , a , a ):
UpperCamelCase__ , UpperCamelCase__ = model_cls.load_config(a , return_unused_kwargs=a )
UpperCamelCase__ = model_cls.from_pretrained(a )
UpperCamelCase__ = cls(model.parameters() , model_cls=a , model_config=model.config )
ema_model.load_state_dict(a )
return ema_model
def __a ( self , a ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
UpperCamelCase__ = self.model_cls.from_config(self.model_config )
UpperCamelCase__ = self.state_dict()
state_dict.pop("shadow_params" , a )
model.register_to_config(**a )
self.copy_to(model.parameters() )
model.save_pretrained(a )
def __a ( self , a ):
UpperCamelCase__ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase__ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase__ = (1 + step) / (10 + step)
UpperCamelCase__ = min(a , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase__ = max(a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __a ( self , a ):
if isinstance(a , torch.nn.Module ):
UpperCamelCase__ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , a , standard_warn=a , )
UpperCamelCase__ = parameters.parameters()
UpperCamelCase__ = list(a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase__ = self.get_decay(self.optimization_step )
UpperCamelCase__ = decay
UpperCamelCase__ = 1 - decay
UpperCamelCase__ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase__ = deepspeed.zero.GatheredParameters(a , modifier_rank=a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a )
def __a ( self , a ):
UpperCamelCase__ = list(a )
for s_param, param in zip(self.shadow_params , a ):
param.data.copy_(s_param.to(param.device ).data )
def __a ( self , a=None , a=None ):
UpperCamelCase__ = [
p.to(device=a , dtype=a ) if p.is_floating_point() else p.to(device=a )
for p in self.shadow_params
]
def __a ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __a ( self , a ):
UpperCamelCase__ = [param.detach().cpu().clone() for param in parameters]
def __a ( self , a ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , a ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase__ = None
def __a ( self , a ):
UpperCamelCase__ = copy.deepcopy(a )
UpperCamelCase__ = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
UpperCamelCase__ = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , a ):
raise ValueError("Invalid min_decay" )
UpperCamelCase__ = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , a ):
raise ValueError("Invalid optimization_step" )
UpperCamelCase__ = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , a ):
raise ValueError("Invalid update_after_step" )
UpperCamelCase__ = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a ):
raise ValueError("Invalid use_ema_warmup" )
UpperCamelCase__ = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
UpperCamelCase__ = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
UpperCamelCase__ = state_dict.get("shadow_params" , a )
if shadow_params is not None:
UpperCamelCase__ = shadow_params
if not isinstance(self.shadow_params , a ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 223
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(__A , __A ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(__A )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__A )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__A )
return (new_height, new_width)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(
a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 223
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=1_000 , ):
__magic_name__ : Any = parent
__magic_name__ : int = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : Optional[int] = is_training
__magic_name__ : Dict = use_input_mask
__magic_name__ : Union[str, Any] = use_token_type_ids
__magic_name__ : List[Any] = use_labels
__magic_name__ : List[str] = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[str] = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : str = max_position_embeddings
__magic_name__ : Union[str, Any] = type_vocab_size
__magic_name__ : str = type_sequence_label_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : Union[str, Any] = num_labels
__magic_name__ : List[Any] = num_choices
__magic_name__ : Optional[int] = scope
__magic_name__ : List[str] = range_bbox
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ : Optional[Any] = bbox[i, j, 3]
__magic_name__ : Tuple = bbox[i, j, 1]
__magic_name__ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ : Union[str, Any] = bbox[i, j, 2]
__magic_name__ : int = bbox[i, j, 0]
__magic_name__ : Union[str, Any] = t
__magic_name__ : List[str] = tf.convert_to_tensor(_a )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[Any] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Optional[int] = None
__magic_name__ : Tuple = None
__magic_name__ : List[Any] = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Union[str, Any] = TFLayoutLMModel(config=_a )
__magic_name__ : Dict = model(_a , _a , attention_mask=_a , token_type_ids=_a )
__magic_name__ : List[Any] = model(_a , _a , token_type_ids=_a )
__magic_name__ : Optional[Any] = model(_a , _a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : str = TFLayoutLMForMaskedLM(config=_a )
__magic_name__ : str = model(_a , _a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = self.num_labels
__magic_name__ : List[str] = TFLayoutLMForSequenceClassification(config=_a )
__magic_name__ : Optional[int] = model(_a , _a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Any = self.num_labels
__magic_name__ : Any = TFLayoutLMForTokenClassification(config=_a )
__magic_name__ : str = model(_a , _a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Any = TFLayoutLMForQuestionAnswering(config=_a )
__magic_name__ : Dict = model(_a , _a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[Any] = config_and_inputs
__magic_name__ : Union[str, Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = 10
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = TFLayoutLMModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = TFLayoutLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Any = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__magic_name__ : str = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__magic_name__ : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__magic_name__ : Optional[int] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__magic_name__ : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
__magic_name__ : Optional[Any] = model(input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
# test the sequence output on [0, :3, :3]
__magic_name__ : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-3 ) )
# test the pooled output on [1, :3]
__magic_name__ : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _a , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# initialize model with randomly initialized sequence classification head
__magic_name__ : List[str] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
__magic_name__ : Union[str, Any] = model(
input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__magic_name__ : str = outputs.loss
__magic_name__ : List[str] = (2,)
self.assertEqual(loss.shape , _a )
# test the shape of the logits
__magic_name__ : int = outputs.logits
__magic_name__ : Union[str, Any] = (2, 2)
self.assertEqual(logits.shape , _a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# initialize model with randomly initialized token classification head
__magic_name__ : Union[str, Any] = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
__magic_name__ : Optional[Any] = model(
input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
# test the shape of the logits
__magic_name__ : Optional[Any] = outputs.logits
__magic_name__ : int = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# initialize model with randomly initialized token classification head
__magic_name__ : Optional[int] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
__magic_name__ : int = model(input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
# test the shape of the logits
__magic_name__ : Tuple = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _a )
self.assertEqual(outputs.end_logits.shape , _a )
| 124
|
from __future__ import annotations
from typing import Any
class _snake_case ( snake_case ):
pass
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Any = data
__magic_name__ : Node | None = None
def __iter__( self ):
__magic_name__ : Any = self
__magic_name__ : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
__magic_name__ : Any = node.next_node
@property
def SCREAMING_SNAKE_CASE ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case : str = Node(1)
snake_case : Dict = Node(2)
snake_case : List[str] = Node(3)
snake_case : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
snake_case : List[Any] = root_node.next_node
print(root_node.has_loop) # True
snake_case : Union[str, Any] = Node(5)
snake_case : Any = Node(6)
snake_case : Any = Node(5)
snake_case : Optional[int] = Node(6)
print(root_node.has_loop) # False
snake_case : str = Node(1)
print(root_node.has_loop) # False
| 124
| 1
|
from copy import deepcopy
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ):
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ ):
return index + (index & (-index))
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ ):
return index - (index & (-index))
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.query(SCREAMING_SNAKE_CASE_ , index + 1 )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
from ..utils import DummyObject, requires_backends
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
| 86
| 1
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
A_ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
A_ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Dict = collections.OrderedDict()
snake_case__ : int = collections.OrderedDict()
snake_case__ : Union[str, Any] = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
snake_case__ : Any = f.readlines()
snake_case__ : Optional[int] = [[t.rstrip('\n' )] if (t == "," or "," not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = b
snake_case__ : Union[str, Any] = idx
for wd in b:
snake_case__ : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( __snake_case ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]="<|endoftext|>" , __lowerCamelCase : str="<|endoftext|>" , __lowerCamelCase : Optional[int]="<|startoftext|>" , __lowerCamelCase : Tuple="<|endoftext|>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Optional[Any] , ):
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
snake_case__ : Any = do_clean_text
snake_case__ : int = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Any = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowerCAmelCase ( self : Tuple ):
return len(self.raw_vocab )
def _lowerCAmelCase ( self : int ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] ):
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def _lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : int ):
snake_case__ : str = "".join(UpperCamelCase__ ).strip()
return out_string
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : "Conversation" ):
snake_case__ : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
snake_case__ : int = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
snake_case__ : Any = 0
if os.path.isdir(UpperCamelCase__ ):
snake_case__ : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : str = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
snake_case__ : Union[str, Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
snake_case__ : str = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
snake_case__ : Optional[Any] = token_index
writer.write(','.join(UpperCamelCase__ ) + '\n' )
index += 1
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( __snake_case ):
def __init__( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case__ : str = vocab # same as swe
snake_case__ : Any = ids_to_tokens # same as bpe
snake_case__ : Dict = emoji
snake_case__ : Any = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
snake_case__ : Any = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
snake_case__ : List[Any] = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
snake_case__ : Optional[Any] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
snake_case__ : Tuple = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
snake_case__ : int = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
snake_case__ : Optional[Any] = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
snake_case__ : int = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
snake_case__ : str = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
snake_case__ : Any = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Tuple ):
return len(self.ids_to_tokens )
def _lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] ):
snake_case__ : Optional[Any] = self.content_repattera.sub('<URL>' , UpperCamelCase__ )
snake_case__ : Optional[Any] = self.content_repattera.sub('<EMAIL>' , UpperCamelCase__ )
snake_case__ : Tuple = self.content_repattera.sub('<TEL>' , UpperCamelCase__ )
snake_case__ : Union[str, Any] = self.content_repattera.sub('<DATE>' , UpperCamelCase__ )
snake_case__ : Optional[Any] = self.content_repattera.sub('<DATE>' , UpperCamelCase__ )
snake_case__ : Optional[int] = self.content_repattera.sub('<PRICE>' , UpperCamelCase__ )
snake_case__ : List[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
snake_case__ : List[Any] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=False ):
snake_case__ : List[str] = text.replace(' ' , '<SP>' )
snake_case__ : List[str] = text.replace(' ' , '<SP>' )
snake_case__ : Optional[int] = text.replace('\r\n' , '<BR>' )
snake_case__ : Any = text.replace('\n' , '<BR>' )
snake_case__ : int = text.replace('\r' , '<BR>' )
snake_case__ : List[str] = text.replace('\t' , '<TAB>' )
snake_case__ : Dict = text.replace('—' , 'ー' )
snake_case__ : Any = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
snake_case__ : str = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
snake_case__ : Any = self.clean_text(UpperCamelCase__ )
def check_simbol(__lowerCamelCase : int ):
snake_case__ : str = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
snake_case__ : Optional[int] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(__lowerCamelCase : Union[str, Any] ):
snake_case__ : str = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
snake_case__ : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
snake_case__ : str = 0
snake_case__ : Dict = []
while pos < len(UpperCamelCase__ ):
snake_case__ : int = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
snake_case__ : List[str] = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
snake_case__ : Tuple = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
snake_case__ : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
snake_case__ : Dict = sorted(UpperCamelCase__ , key=lambda __lowerCamelCase : x[0] )[0]
result.append(UpperCamelCase__ )
snake_case__ : int = e
else:
snake_case__ : Optional[Any] = pos + 1
snake_case__ : Optional[Any] = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append('<KIGOU>' )
elif checkuae(UpperCamelCase__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
snake_case__ : str = end
return result
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple="\n" ):
snake_case__ : Optional[int] = []
snake_case__ : List[str] = []
snake_case__ : Optional[int] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode('utf-8' , errors='replace' ) )
snake_case__ : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode('utf-8' , errors='replace' ) )
snake_case__ : Tuple = "".join(UpperCamelCase__ )
return text
| 270
|
"""simple docstring"""
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCAmelCase: Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__SCREAMING_SNAKE_CASE ) )
] # the reference grid
__lowerCAmelCase: Tuple = 1
__lowerCAmelCase: Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__SCREAMING_SNAKE_CASE ) )
] # the action grid
__lowerCAmelCase: Tuple = init[0]
__lowerCAmelCase: Any = init[1]
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: int = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase: Optional[Any] = [[f, g, x, y]]
__lowerCAmelCase: Union[str, Any] = False # flag that is set when search is complete
__lowerCAmelCase: List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase: Union[str, Any] = cell.pop()
__lowerCAmelCase: Optional[int] = next_cell[2]
__lowerCAmelCase: int = next_cell[3]
__lowerCAmelCase: Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase: int = True
else:
for i in range(len(__SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
__lowerCAmelCase: Dict = x + DIRECTIONS[i][0]
__lowerCAmelCase: str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase: Tuple = g + cost
__lowerCAmelCase: Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase: int = 1
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: int = []
__lowerCAmelCase: Dict = goal[0]
__lowerCAmelCase: Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase: Tuple = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase: Tuple = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase: List[Any] = xa
__lowerCAmelCase: Dict = ya
invpath.append([x, y] )
__lowerCAmelCase: Tuple = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(__SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 346
| 0
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __a :
def __init__( self : str ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[str]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = start
__SCREAMING_SNAKE_CASE = end
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = (start + end) // 2
__SCREAMING_SNAKE_CASE = left
__SCREAMING_SNAKE_CASE = right
def __repr__( self : List[str] ):
'''simple docstring'''
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __a :
def __init__( self : int ,lowerCamelCase : Sequence ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = collection
__SCREAMING_SNAKE_CASE = function
if self.collection:
__SCREAMING_SNAKE_CASE = self._build_tree(0 ,len(lowerCamelCase ) - 1 )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : int ):
'''simple docstring'''
self._update_tree(self.root ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self._query_range(self.root ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowerCamelCase ,lowerCamelCase ,self.collection[start] )
__SCREAMING_SNAKE_CASE = (start + end) // 2
__SCREAMING_SNAKE_CASE = self._build_tree(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._build_tree(mid + 1 ,lowerCamelCase )
return SegmentTreeNode(lowerCamelCase ,lowerCamelCase ,self.fn(left.val ,right.val ) ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : str ):
'''simple docstring'''
if node.start == i and node.end == i:
__SCREAMING_SNAKE_CASE = val
return
if i <= node.mid:
self._update_tree(node.left ,lowerCamelCase ,lowerCamelCase )
else:
self._update_tree(node.right ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.fn(node.left.val ,node.right.val )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left ,lowerCamelCase ,lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left ,lowerCamelCase ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,lowerCamelCase ) ,)
else:
# range in right child tree
return self._query_range(node.right ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.root is not None:
__SCREAMING_SNAKE_CASE = Queue()
queue.put(self.root )
while not queue.empty():
__SCREAMING_SNAKE_CASE = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
a = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 713
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowerCamelCase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Model type selected in the list: " + ", ".join(_a )} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase__ = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase__ = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __snake_case (_a ):
lowerCAmelCase__ = "train"
lowerCAmelCase__ = "dev"
class __snake_case (_a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Dict , _UpperCAmelCase : SquadDataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Union[str, Split] = Split.train , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = "pt" , ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = args
_lowerCAmelCase : Optional[Any] = is_language_sensitive
_lowerCAmelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_lowerCAmelCase : Dict = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase : Optional[Any] = mode
# Load data features from cache or dataset file
_lowerCAmelCase : int = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : List[str] = cached_features_file + """.lock"""
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Optional[Any] = torch.load(_UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase : str = self.old_features["""features"""]
_lowerCAmelCase : Dict = self.old_features.get("""dataset""" , _UpperCAmelCase )
_lowerCAmelCase : Tuple = self.old_features.get("""examples""" , _UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase : Union[str, Any] = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_UpperCAmelCase , )
_lowerCAmelCase : int = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.features[i]
_lowerCAmelCase : int = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase : Optional[int] = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase : List[str] = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase : Optional[int] = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase : int = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 429
|
from collections import deque
from .hash_table import HashTable
class __snake_case (_a ):
def __init__( self : int , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
_lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.values[key]
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase )
| 429
| 1
|
from __future__ import annotations
import math
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Dict = size
# approximate the overall size of segment tree with given value
lowercase_ :Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase_ :Optional[Any] = [0 for i in range(0 , 4 * size )]
lowercase_ :Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase ( self , UpperCamelCase_ ):
return idx * 2
def UpperCamelCase ( self , UpperCamelCase_ ):
return idx * 2 + 1
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if left_element == right_element:
lowercase_ :Tuple = a[left_element - 1]
else:
lowercase_ :Any = (left_element + right_element) // 2
self.build(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.build(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Union[str, Any] = max(
self.segment_tree[self.left(UpperCamelCase_ )] , self.segment_tree[self.right(UpperCamelCase_ )] )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if self.flag[idx] is True:
lowercase_ :Optional[int] = self.lazy[idx]
lowercase_ :Optional[int] = False
if left_element != right_element:
lowercase_ :str = self.lazy[idx]
lowercase_ :str = self.lazy[idx]
lowercase_ :Union[str, Any] = True
lowercase_ :Optional[int] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase_ :List[str] = val
if left_element != right_element:
lowercase_ :Tuple = val
lowercase_ :Optional[Any] = val
lowercase_ :Union[str, Any] = True
lowercase_ :Optional[Any] = True
return True
lowercase_ :str = (left_element + right_element) // 2
self.update(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.update(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = max(
self.segment_tree[self.left(UpperCamelCase_ )] , self.segment_tree[self.right(UpperCamelCase_ )] )
return True
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if self.flag[idx] is True:
lowercase_ :Optional[int] = self.lazy[idx]
lowercase_ :Tuple = False
if left_element != right_element:
lowercase_ :Tuple = self.lazy[idx]
lowercase_ :List[Any] = self.lazy[idx]
lowercase_ :str = True
lowercase_ :int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase_ :str = (left_element + right_element) // 2
lowercase_ :Dict = self.query(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.query(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return max(UpperCamelCase_ , UpperCamelCase_ )
def __str__( self ):
return str([self.query(1 , 1 , self.size , UpperCamelCase_ , UpperCamelCase_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
SCREAMING_SNAKE_CASE : Dict = 15
SCREAMING_SNAKE_CASE : Any = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 716
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( _a , _a , _a ) -> List[str]:
'''simple docstring'''
if openai_config_file == "":
lowercase_ :str = OpenAIGPTConfig()
else:
lowercase_ :int = OpenAIGPTConfig.from_json_file(_a )
lowercase_ :int = OpenAIGPTModel(_a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_a , _a , _a )
# Save pytorch-model
lowercase_ :Optional[int] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase_ :List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 441
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if rng is None:
_lowerCAmelCase : Dict = random.Random()
_lowerCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
_lowerCAmelCase : Optional[int] = []
for _ in range(A__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCAmelCase : List[Any] = np.array(A__ , dtype=jnp.intaa ).reshape(A__ )
return output
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor(A__ , vocab_size=2 , rng=A__ )
# make sure that at least one token is attended to for each batch
_lowerCAmelCase : Optional[Any] = 1
return attn_mask
@require_flax
class __UpperCamelCase :
_UpperCAmelCase = None
_UpperCAmelCase = ()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : List[str] = inputs['input_ids'].shape[-1] // 2
_lowerCAmelCase : Union[str, Any] = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowerCAmelCase : Optional[Any] = jnp.ones_like(__lowerCamelCase )
_lowerCAmelCase : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCAmelCase : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCAmelCase : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = self._get_input_ids_and_config()
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = max_length
_lowerCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : str = model_class(__lowerCamelCase )
_lowerCAmelCase : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase : List[str] = getattr(__lowerCamelCase ,__lowerCamelCase )
_lowerCAmelCase : Tuple = pt_model_class(__lowerCamelCase ).eval()
_lowerCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__lowerCamelCase ,flax_model.params )
_lowerCAmelCase : List[Any] = flax_model.generate(__lowerCamelCase ).sequences
_lowerCAmelCase : int = pt_model.generate(torch.tensor(__lowerCamelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCAmelCase : Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCAmelCase : Dict = False
_lowerCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Optional[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : int = jit(model.generate )
_lowerCAmelCase : List[str] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = self._get_input_ids_and_config()
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : Optional[Any] = jit(model.generate )
_lowerCAmelCase : Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : List[str] = max_length
_lowerCAmelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : str = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : Optional[int] = jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self._get_input_ids_and_config()
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[Any] = max_length
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Any = model_class(__lowerCamelCase )
_lowerCAmelCase : List[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = max_length
_lowerCAmelCase : List[str] = 0.8
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : Any = 0.3
_lowerCAmelCase : int = 1
_lowerCAmelCase : Union[str, Any] = 8
_lowerCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : str = jit(model.generate )
_lowerCAmelCase : Tuple = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCAmelCase : List[str] = max_length
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : int = 8
_lowerCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Tuple = model_class(__lowerCamelCase )
_lowerCAmelCase : Dict = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : Optional[int] = jit(model.generate )
_lowerCAmelCase : List[str] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : Optional[Any] = max_length
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = 8
_lowerCAmelCase : Tuple = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Dict = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : Optional[int] = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : int = False
_lowerCAmelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : Optional[int] = jit_generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Any = model_class(__lowerCamelCase )
_lowerCAmelCase : str = model.generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : int = jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : List[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Optional[int] = model.generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
_lowerCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCAmelCase : Any = 'Hello world'
_lowerCAmelCase : Optional[Any] = tokenizer(__lowerCamelCase ,return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase ,'do_samples' ):
model.generate(__lowerCamelCase ,do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase ,'foo' ):
_lowerCAmelCase : Union[str, Any] = {'foo': 'bar'}
model.generate(__lowerCamelCase ,**__lowerCamelCase )
| 259
|
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase = 16
__lowerCamelCase = 32
def a ( __UpperCAmelCase : Union[str, Any] ) -> str:
return int(x / 2**2_0 )
class __A :
def __enter__( self : Dict ) -> Tuple:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__magic_name__: int = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[int] , *__snake_case : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
__magic_name__: Tuple = torch.cuda.memory_allocated()
__magic_name__: Any = torch.cuda.max_memory_allocated()
__magic_name__: List[str] = bamb(self.end - self.begin )
__magic_name__: Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : str = "bert-base-cased" , __UpperCAmelCase : int = 3_2_0 , __UpperCAmelCase : int = 1_6_0 , ) -> Any:
__magic_name__: Union[str, Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
__magic_name__: List[str] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(__UpperCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__: List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__: Tuple = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__: Dict = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__magic_name__: List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
__magic_name__: str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
__magic_name__: List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__: str = config["lr"]
__magic_name__: Optional[int] = int(config["""num_epochs"""] )
__magic_name__: Dict = int(config["""seed"""] )
__magic_name__: Tuple = int(config["""batch_size"""] )
__magic_name__: int = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
__magic_name__: Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
__magic_name__: Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__: Dict = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__: int = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__magic_name__: Optional[Any] = 1
__magic_name__: Union[str, Any] = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__: str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
__magic_name__: List[str] = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__: int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__: List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__: Optional[Any] = 0
# Now we train the model
__magic_name__: List[str] = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
__magic_name__: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
__magic_name__: List[str] = outputs.loss
__magic_name__: Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__magic_name__: Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( ) -> Optional[int]:
__magic_name__: Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=SCREAMING_SNAKE_CASE_ , default=3_2_0 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=SCREAMING_SNAKE_CASE_ , default=1_6_0 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="""Number of train epochs.""" , )
__magic_name__: List[Any] = parser.parse_args()
__magic_name__: int = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 702
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MvpTokenizer
UpperCamelCase_ = MvpTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = filter_roberta_detectors
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase : Dict =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Tuple =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[Any] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : List[Any] ={'''unk_token''': '''<unk>'''}
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : List[str] , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def A__ ( self : Any ) -> int:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase : List[str] =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] =tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : Union[str, Any] =batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : Any =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : int =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Optional[Any] =tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =['''A long paragraph for summarization.''']
lowercase : List[Any] =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] =tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
lowercase : Optional[int] =inputs['''input_ids''']
lowercase : Optional[Any] =inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowercase : Tuple =self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowercase : Optional[Any] ='''A, <mask> AllenNLP sentence.'''
lowercase : int =tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
lowercase : List[Any] =tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase : str =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 94
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = field(default=lowerCamelCase__ , metadata={"help": "Whether to use SortishSampler or not."} )
__lowerCamelCase : Union[str, Any] = field(
default=lowerCamelCase__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCamelCase : str = field(
default=lowerCamelCase__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
__lowerCamelCase : Union[str, Any] = field(
default=lowerCamelCase__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
__lowerCamelCase : Optional[int] = field(
default=lowerCamelCase__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a__ : str = v.to_dict()
return d
| 702
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
a__ : Any = create_tensor(lowerCAmelCase__ )
a__ : Optional[Any] = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = [state.process_index]
a__ : Optional[int] = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : str = create_tensor(lowerCAmelCase__ )
a__ : Any = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
a__ : Any = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a__ : Union[str, Any] = torch.arange(state.num_processes ).to(state.device )
a__ : List[Any] = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase__ ( lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : List[str] = create_tensor(lowerCAmelCase__ )
a__ : Union[str, Any] = reduce(lowerCAmelCase__ , "sum" )
a__ : List[str] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}"
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : Tuple = create_tensor(lowerCAmelCase__ )
a__ : Dict = reduce(lowerCAmelCase__ , "mean" )
a__ : Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}"
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(lowerCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(lowerCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(lowerCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(lowerCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(lowerCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 251
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "vision-encoder-decoder"
UpperCAmelCase = True
def __init__( self : Optional[Any] , **_A : Optional[int] ):
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_UpperCamelCase = kwargs.pop('''encoder''' )
_UpperCamelCase = encoder_config.pop('''model_type''' )
_UpperCamelCase = kwargs.pop('''decoder''' )
_UpperCamelCase = decoder_config.pop('''model_type''' )
_UpperCamelCase = AutoConfig.for_model(_A , **_A )
_UpperCamelCase = AutoConfig.for_model(_A , **_A )
_UpperCamelCase = True
@classmethod
def UpperCamelCase_ ( cls : Tuple , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Union[str, Any] ):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_UpperCamelCase = True
_UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.encoder.to_dict()
_UpperCamelCase = self.decoder.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self : Tuple ):
return 1e-4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = OrderedDict()
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def UpperCamelCase_ ( self : List[str] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ):
import torch
_UpperCamelCase = OrderedDict()
_UpperCamelCase = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
_UpperCamelCase , _UpperCamelCase = dummy_input['''input_ids'''].shape
_UpperCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCamelCase = dummy_input.pop('''input_ids''' )
_UpperCamelCase = dummy_input.pop('''attention_mask''' )
_UpperCamelCase = torch.zeros(_A )
return common_inputs
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : Optional[int] , _A : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def UpperCamelCase_ ( self : str , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ):
_UpperCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A )
| 10
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , __lowercase : nn.Module , __lowercase : int ):
'''simple docstring'''
super().__init__()
__a = module
__a = nn.Sequential(
nn.Linear(module.in_features , __lowercase , bias=__lowercase ) , nn.Linear(__lowercase , module.out_features , bias=__lowercase ) , )
__a = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__lowercase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase_ ( self : str , __lowercase : int , *__lowercase : Tuple , **__lowercase : str ):
'''simple docstring'''
return self.module(__lowercase , *__lowercase , **__lowercase ) + self.adapter(__lowercase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__lowerCamelCase : Tuple ='bigscience/bloom-1b7'
# Constant values
__lowerCamelCase : Tuple =2.109_6595_5269_2574
__lowerCamelCase : int ='Hello my name is'
__lowerCamelCase : Optional[Any] =set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowerCamelCase : int =10
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
# Models and tokenizer
__a = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map="""auto""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_abit.config
self.assertTrue(hasattr(__lowercase , """quantization_config""" ) )
__a = config.to_dict()
__a = config.to_diff_dict()
__a = config.to_json_string()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a = self.model_fpaa.get_memory_footprint()
__a = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__lowercase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.tokenizer(self.input_text , return_tensors="""pt""" )
__a = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowercase ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = BitsAndBytesConfig()
__a = True
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowercase , device_map="""auto""" )
__a = self.tokenizer(self.input_text , return_tensors="""pt""" )
__a = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowercase ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
with self.assertRaises(__lowercase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = BitsAndBytesConfig()
with self.assertRaises(__lowercase ):
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowercase , load_in_abit=__lowercase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(__lowercase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(__lowercase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a = self.tokenizer(self.input_text , return_tensors="""pt""" )
__a = self.model_fpaa.to(torch.floataa )
__a = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
__a = self.model_fpaa.half()
# Check this does not throw an error
__a = self.model_fpaa.float()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=__lowercase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
'''simple docstring'''
__a = """t5-small"""
__a = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
__a = AutoTokenizer.from_pretrained(cls.model_name )
__a = """Translate in German: Hello, my dog is cute"""
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a = TaForConditionalGeneration._keep_in_fpaa_modules
__a = None
# test with `t5-small`
__a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map="""auto""" )
__a = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__a = model.generate(**__lowercase )
# test with `flan-t5-small`
__a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowercase , device_map="""auto""" )
__a = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__a = model.generate(**__lowercase )
__a = modules
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__a = model.generate(**__lowercase )
# test with `flan-t5-small`
__a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowercase , device_map="""auto""" )
__a = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__a = model.generate(**__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# model_name
__a = """bigscience/bloom-560m"""
__a = """t5-small"""
# Different types of model
__a = AutoModel.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map="""auto""" )
# Sequence classification model
__a = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__lowercase , device_map="""auto""" )
# CausalLM model
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map="""auto""" )
# Seq2seq model
__a = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__lowercase , device_map="""auto""" )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__lowercase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
__a = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__lowercase ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = """facebook/opt-350m"""
super().setUp()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowercase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__lowercase ) ):
__a = LoRALayer(module.q_proj , rank=16 )
__a = LoRALayer(module.k_proj , rank=16 )
__a = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a = model.forward(**__lowercase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__lowercase , __lowercase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__lowercase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='gpt2-xl'
__lowerCamelCase : Dict =3.3191_8548_5415_2187
| 547
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : int , __lowercase : str=13 , __lowercase : Tuple=7 , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : List[str]=True , __lowercase : List[str]=True , __lowercase : Any=99 , __lowercase : int=32 , __lowercase : Optional[int]=2 , __lowercase : List[str]=4 , __lowercase : int=37 , __lowercase : Optional[int]="gelu" , __lowercase : Any=0.1 , __lowercase : List[Any]=0.1 , __lowercase : int=512 , __lowercase : str=16 , __lowercase : str=2 , __lowercase : Optional[Any]=0.02 , __lowercase : str=3 , __lowercase : str=4 , __lowercase : str=None , ):
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = """gelu"""
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Any , __lowercase : Tuple , __lowercase : int , __lowercase : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
__a = TFRoFormerModel(config=__lowercase )
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__a = [input_ids, input_mask]
__a = model(__lowercase )
__a = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Any , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : int , __lowercase : Any , __lowercase : str , __lowercase : Optional[int] ):
'''simple docstring'''
__a = True
__a = TFRoFormerForCausalLM(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : int , __lowercase : List[str] , __lowercase : str ):
'''simple docstring'''
__a = TFRoFormerForMaskedLM(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = TFRoFormerForSequenceClassification(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Any , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : int , __lowercase : Any ):
'''simple docstring'''
__a = self.num_choices
__a = TFRoFormerForMultipleChoice(config=__lowercase )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : str , __lowercase : int , __lowercase : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = TFRoFormerForTokenClassification(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Any , __lowercase : str , __lowercase : Dict ):
'''simple docstring'''
__a = TFRoFormerForQuestionAnswering(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Optional[int] =(
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[int] =False
__lowerCamelCase : Tuple =False
def UpperCamelCase_ ( self : Any , __lowercase : int , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Tuple ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = TFRoFormerModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(__lowercase )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(__lowercase )[0]
# TODO Replace vocab size
__a = 50000
__a = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__a = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-4 )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : Dict =1e-4
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.constant([[4, 10]] )
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__a = emba(input_ids.shape )
__a = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowercase , __lowercase , atol=self.tolerance )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__a = emba.weight[:3, :5]
tf.debugging.assert_near(__lowercase , __lowercase , atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : int =1e-4
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# 2,12,16,64
__a = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__a = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__a = embed_positions([2, 16, 768] )[None, None, :, :]
__a , __a = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowercase , __lowercase , __lowercase )
__a = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__a = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowercase , atol=self.tolerance )
| 547
| 1
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = []
def A__ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCAmelCase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCAmelCase__ = self.__min_dist_top_down_dp(__lowerCamelCase , n - 1 )
lowerCAmelCase__ = self.__min_dist_top_down_dp(m - 1 , __lowerCamelCase )
lowerCAmelCase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCAmelCase__ = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
def A__ ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = worda
lowerCAmelCase__ = worda
lowerCAmelCase__ = [[-1 for _ in range(len(__lowerCamelCase ) )] for _ in range(len(__lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCamelCase ) - 1 , len(__lowerCamelCase ) - 1 )
def A__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
lowerCAmelCase__ = worda
lowerCAmelCase__ = worda
lowerCAmelCase__ = len(__lowerCamelCase )
lowerCAmelCase__ = len(__lowerCamelCase )
lowerCAmelCase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCAmelCase__ = j
elif j == 0: # second string is empty
lowerCAmelCase__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCAmelCase__ = self.dp[i - 1][j - 1]
else:
lowerCAmelCase__ = self.dp[i][j - 1]
lowerCAmelCase__ = self.dp[i - 1][j]
lowerCAmelCase__ = self.dp[i - 1][j - 1]
lowerCAmelCase__ = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
__magic_name__ : List[Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__magic_name__ : Dict = input("""Enter the first string: """).strip()
__magic_name__ : Union[str, Any] = input("""Enter the second string: """).strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 615
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 514
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
return 1
def lowerCAmelCase_ ( lowercase: Tuple ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCAmelCase_ ( lowercase: Optional[Any] ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCAmelCase_ ( lowercase: str ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCAmelCase_ ( lowercase: Tuple ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCAmelCase_ ( lowercase: Any ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCAmelCase_ ( lowercase: str ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCAmelCase_ ( lowercase: Dict ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCAmelCase_ ( lowercase: Any = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 703
|
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , _lowercase : int=None , **_lowercase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 264
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase =logging.getLogger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''sequence-classification'''
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if type(__SCREAMING_SNAKE_CASE ) == dict:
UpperCamelCase__ : List[str] = Namespace(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = glue_output_modes[hparams.task]
UpperCamelCase__ : str = glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode )
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.model(**__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCamelCase__ : int = self(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = outputs[0]
UpperCamelCase__ : Dict = self.trainer.lr_schedulers[0]['''scheduler''']
UpperCamelCase__ : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.hparams
UpperCamelCase__ : int = processors[args.task]()
UpperCamelCase__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase__ : str = self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __SCREAMING_SNAKE_CASE )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCamelCase__ : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
UpperCamelCase__ : Any = convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> DataLoader:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = '''dev''' if mode == '''test''' else mode
UpperCamelCase__ : Optional[int] = self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info('''Loading features from cached file %s''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ : List[str] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCamelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : List[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : Any = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Union[str, Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCamelCase__ : List[str] = self(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : List[str] = outputs[:2]
UpperCamelCase__ : Optional[int] = logits.detach().cpu().numpy()
UpperCamelCase__ : int = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
UpperCamelCase__ : int = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : Tuple = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : Dict = np.squeeze(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCamelCase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ : List[Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
UpperCamelCase__ : Optional[int] = dict(results.items() )
UpperCamelCase__ : Dict = results
return ret, preds_list, out_label_list
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> dict:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = self._eval_end(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> dict:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = self._eval_end(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__SCREAMING_SNAKE_CASE , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = argparse.ArgumentParser()
add_generic_args(UpperCamelCase__ , os.getcwd() )
UpperCamelCase__ : int = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
UpperCamelCase__ : Any = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase__ : Union[str, Any] = os.path.join(
'''./results''' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
UpperCamelCase__ : Tuple = GLUETransformer(UpperCamelCase__ )
UpperCamelCase__ : Dict = generic_train(UpperCamelCase__ , UpperCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase__ : int = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=UpperCamelCase__ ) )
UpperCamelCase__ : Optional[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 285
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__SCREAMING_SNAKE_CASE , use_timestep_embedding=__SCREAMING_SNAKE_CASE , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
UpperCamelCase__ : Union[str, Any] = IPNDMScheduler()
UpperCamelCase__ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Any:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase__ : Optional[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : str = DanceDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = pipe(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = output.audios
UpperCamelCase__ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase__ : List[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = torch_device
UpperCamelCase__ : Any = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
UpperCamelCase__ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase__ : str = output.audios
UpperCamelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase__ : Tuple = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = torch_device
UpperCamelCase__ : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Tuple = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = pipe(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase__ : List[Any] = output.audios
UpperCamelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase__ : Optional[Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 285
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a = logging.get_logger(__name__)
a = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _snake_case ( _snake_case : str ) -> Tuple:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_A = model_type_to_module_name(_snake_case )
_A = importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_snake_case , _snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_snake_case , '__name__' , _snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A = importlib.import_module('transformers' )
if hasattr(_snake_case , _snake_case ):
return getattr(_snake_case , _snake_case )
return None
def _snake_case ( _snake_case : Union[str, os.PathLike] , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[Dict[str, str]] = None , _snake_case : Optional[Union[bool, str]] = None , _snake_case : Optional[str] = None , _snake_case : bool = False , **_snake_case : Optional[Any] , ) -> List[str]:
'''simple docstring'''
_A = get_file_from_repo(
_snake_case , _snake_case , cache_dir=_snake_case , force_download=_snake_case , resume_download=_snake_case , proxies=_snake_case , use_auth_token=_snake_case , revision=_snake_case , local_files_only=_snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_snake_case , encoding='utf-8' ) as reader:
return json.load(_snake_case )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def lowerCAmelCase_ ( cls : Dict , _UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ):
_A = kwargs.pop('config' , _UpperCAmelCase )
_A = kwargs.pop('trust_remote_code' , _UpperCAmelCase )
_A = True
_A , _A = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase )
_A = config_dict.get('image_processor_type' , _UpperCAmelCase )
_A = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_A = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_A = config_dict.pop('feature_extractor_type' , _UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_A = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_A = config_dict['auto_map']['AutoFeatureExtractor']
_A = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# It could be in `config.image_processor_type``
_A = getattr(_UpperCAmelCase , 'image_processor_type' , _UpperCAmelCase )
if hasattr(_UpperCAmelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_A = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_A = image_processor_class_from_name(_UpperCAmelCase )
_A = image_processor_auto_map is not None
_A = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_A = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if has_remote_code and trust_remote_code:
_A = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
_A = kwargs.pop('code_revision' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_A = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase )
| 505
|
"""simple docstring"""
a = 256
# Modulus to hash a string
a = 1_000_003
def _snake_case ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
_A = 'abc1abc12'
_A = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_A = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = 'ABABX'
_A = 'ABABZABABYABABX'
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = 'AAAB'
_A = 'ABAAAAAB'
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = 'abcdabcy'
_A = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = 'Lü'
_A = 'Lüsai'
assert rabin_karp(_snake_case , _snake_case )
_A = 'Lue'
assert not rabin_karp(_snake_case , _snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 505
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = SpeechTaTokenizer
a_ = False
a_ = True
def _a ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A_ : int = SpeechTaTokenizer(_a )
A_ : Dict = AddedToken("""<mask>""" ,lstrip=_a ,rstrip=_a )
A_ : Tuple = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Tuple ,_a : Any ):
'''simple docstring'''
A_ : Any = """this is a test"""
A_ : Dict = """this is a test"""
return input_text, output_text
def _a ( self : Optional[int] ,_a : List[Any] ,_a : Union[str, Any]=False ,_a : List[str]=20 ,_a : str=5 ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.get_input_output_texts(_a )
A_ : str = tokenizer.encode(_a ,add_special_tokens=_a )
A_ : Dict = tokenizer.decode(_a ,clean_up_tokenization_spaces=_a )
return text, ids
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = """<pad>"""
A_ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-4] ,"""œ""" )
self.assertEqual(vocab_keys[-2] ,"""<mask>""" )
self.assertEqual(vocab_keys[-1] ,"""<ctc_blank>""" )
self.assertEqual(len(_a ) ,81 )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A_ : int = tokenizer.vocab_size
A_ : Tuple = len(_a )
self.assertNotEqual(_a ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A_ : str = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
A_ : Optional[int] = tokenizer.add_tokens(_a )
A_ : Dict = tokenizer.vocab_size
A_ : List[str] = len(_a )
self.assertNotEqual(_a ,0 )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,len(_a ) )
self.assertEqual(_a ,all_size + len(_a ) )
A_ : Union[str, Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" ,add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
A_ : Dict = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
A_ : List[Any] = tokenizer.add_special_tokens(_a )
A_ : int = tokenizer.vocab_size
A_ : Tuple = len(_a )
self.assertNotEqual(_a ,0 )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,len(_a ) )
self.assertEqual(_a ,all_size_a + len(_a ) )
A_ : List[str] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" ,add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _a ( self : str ):
'''simple docstring'''
pass
def _a ( self : Dict ):
'''simple docstring'''
pass
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizer()
A_ : List[str] = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_a ,[SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
A_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a ,[SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
A_ : Optional[Any] = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A_ : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a ,[SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
A_ : Optional[Any] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name="""microsoft/speecht5_asr""" ,revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" ,sequences=_a ,)
| 665
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
from math import sqrt
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654
| 0
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
lowerCAmelCase :Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase :int = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase :Dict = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase :Optional[int] = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
lowerCAmelCase , lowerCAmelCase :Optional[Any] = get_image_size(a__ )
lowerCAmelCase , lowerCAmelCase :List[str] = output_size
# determine new height and width
lowerCAmelCase :Any = output_height / input_height
lowerCAmelCase :List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase :Union[str, Any] = scale_width
else:
# fit height
lowerCAmelCase :Union[str, Any] = scale_height
lowerCAmelCase :Optional[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
lowerCAmelCase :List[str] = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class __UpperCamelCase ( UpperCamelCase ):
lowercase_ : Any = ["""pixel_values"""]
def __init__( self : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = False , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Optional[Any] , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCAmelCase :Any = size if size is not None else {'height': 384, 'width': 384}
lowerCAmelCase :Tuple = get_size_dict(UpperCAmelCase )
lowerCAmelCase :List[str] = do_resize
lowerCAmelCase :str = size
lowerCAmelCase :str = keep_aspect_ratio
lowerCAmelCase :Union[str, Any] = ensure_multiple_of
lowerCAmelCase :Union[str, Any] = resample
lowerCAmelCase :Tuple = do_rescale
lowerCAmelCase :List[str] = rescale_factor
lowerCAmelCase :Optional[int] = do_normalize
lowerCAmelCase :Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase :List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : bool = False , UpperCAmelCase : int = 1 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> np.ndarray:
lowerCAmelCase :List[str] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase :Any = get_resize_output_image_size(
UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=UpperCAmelCase , multiple=UpperCAmelCase , )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Union[str, Any] , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[Any] , ) -> PIL.Image.Image:
lowerCAmelCase :Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase :List[str] = size if size is not None else self.size
lowerCAmelCase :Optional[int] = get_size_dict(UpperCAmelCase )
lowerCAmelCase :Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase :str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase :Optional[int] = resample if resample is not None else self.resample
lowerCAmelCase :int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase :Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase :Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase :Optional[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase :Union[str, Any] = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase :Tuple = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase :Dict = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase :str = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase :List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCAmelCase :int = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCAmelCase :str = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Tuple] = None ) -> Tuple:
lowerCAmelCase :Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCAmelCase ):
lowerCAmelCase :str = target_sizes.numpy()
lowerCAmelCase :str = []
for idx in range(len(UpperCAmelCase ) ):
lowerCAmelCase :str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCAmelCase :int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase )
else:
lowerCAmelCase :Tuple = logits.argmax(dim=1 )
lowerCAmelCase :Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 553
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowerCAmelCase :List[Any] = img
lowerCAmelCase :Optional[int] = img.shape[1]
lowerCAmelCase :Union[str, Any] = img.shape[0]
lowerCAmelCase :Optional[int] = dst_width
lowerCAmelCase :Optional[int] = dst_height
lowerCAmelCase :Any = self.src_w / self.dst_w
lowerCAmelCase :Dict = self.src_h / self.dst_h
lowerCAmelCase :int = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase :Optional[Any] = self.img[self.get_y(UpperCAmelCase )][self.get_x(UpperCAmelCase )]
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : int ) -> int:
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 8_00, 6_00
__SCREAMING_SNAKE_CASE = imread('image_data/lena.jpg', 1)
__SCREAMING_SNAKE_CASE = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 553
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : str = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCamelCase : str = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCamelCase : str = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowerCamelCase : Optional[int] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCamelCase : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : Tuple = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowerCamelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : List[str] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : Tuple = flax_model.params["encoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : int = tax_attention_key
_lowerCamelCase : Union[str, Any] = tax_attention_out
_lowerCamelCase : str = tax_attention_query
_lowerCamelCase : Dict = tax_attention_value
_lowerCamelCase : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
_lowerCamelCase : Optional[Any] = tax_mlp_wi_a
_lowerCamelCase : int = tax_mlp_wi_a
else:
_lowerCamelCase : str = tax_mlp_wi
_lowerCamelCase : Optional[int] = tax_mlp_wo
_lowerCamelCase : List[str] = tax_mlp_layer_norm
_lowerCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : int = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[str] = tax_encoder_global_rel_embedding
# Assigning
_lowerCamelCase : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowerCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : str = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowerCamelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowerCamelCase : Any = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowerCamelCase : List[str] = tax_enc_dec_attention_module["key"]["kernel"]
_lowerCamelCase : Tuple = tax_enc_dec_attention_module["out"]["kernel"]
_lowerCamelCase : Union[str, Any] = tax_enc_dec_attention_module["query"]["kernel"]
_lowerCamelCase : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : str = flax_model.params["decoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : Tuple = tax_attention_key
_lowerCamelCase : List[str] = tax_attention_out
_lowerCamelCase : Union[str, Any] = tax_attention_query
_lowerCamelCase : Optional[int] = tax_attention_value
_lowerCamelCase : Optional[Any] = tax_pre_attention_layer_norm
_lowerCamelCase : Tuple = tax_enc_dec_attention_key
_lowerCamelCase : List[str] = tax_enc_dec_attention_out
_lowerCamelCase : Tuple = tax_enc_dec_attention_query
_lowerCamelCase : Tuple = tax_enc_dec_attention_value
_lowerCamelCase : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_lowerCamelCase : List[Any] = tax_mlp_wi_a
_lowerCamelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCamelCase : Dict = tax_mlp_wi
_lowerCamelCase : Union[str, Any] = tax_mlp_wo
_lowerCamelCase : Dict = txa_mlp_layer_norm
_lowerCamelCase : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowerCamelCase : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
_lowerCamelCase : int = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCamelCase : Union[str, Any] = tax_model["target"]["token_embedder"]["embedding"]
_lowerCamelCase : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 386
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
@dataclass
class a__ :
snake_case__ = 42
snake_case__ = 42
snake_case__ = None
snake_case__ = None
class a__ ( lowerCamelCase__ ):
snake_case__ = """train"""
snake_case__ = """dev"""
snake_case__ = """test"""
class a__ :
@staticmethod
def __UpperCamelCase ( a__ : Union[str, Any] ,a__ : int) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __UpperCamelCase ( a__ : List[str]) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __UpperCamelCase ( a__ : List[Any] ,a__ : Union[str, Any] ,a__ : int ,a__ : str ,a__ : Union[str, Any]=False ,a__ : Union[str, Any]="[CLS]" ,a__ : List[Any]=1 ,a__ : Dict="[SEP]" ,a__ : Dict=False ,a__ : List[str]=False ,a__ : Tuple=0 ,a__ : Dict=0 ,a__ : Union[str, Any]=-100 ,a__ : List[Any]=0 ,a__ : List[str]=True ,) -> List[InputFeatures]:
"""simple docstring"""
_lowerCAmelCase:str = {label: i for i, label in enumerate(snake_case__)}
_lowerCAmelCase:Optional[Any] = []
for ex_index, example in enumerate(snake_case__):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' ,snake_case__ ,len(snake_case__))
_lowerCAmelCase:Dict = []
_lowerCAmelCase:Optional[int] = []
for word, label in zip(example.words ,example.labels):
_lowerCAmelCase:Dict = tokenizer.tokenize(snake_case__)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case__) > 0:
tokens.extend(snake_case__)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case__) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase:Any = tokenizer.num_special_tokens_to_add()
if len(snake_case__) > max_seq_length - special_tokens_count:
_lowerCAmelCase:List[Any] = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase:int = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase:Union[str, Any] = [sequence_a_segment_id] * len(snake_case__)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase:Tuple = [cls_token] + tokens
_lowerCAmelCase:Tuple = [pad_token_label_id] + label_ids
_lowerCAmelCase:Tuple = [cls_token_segment_id] + segment_ids
_lowerCAmelCase:int = tokenizer.convert_tokens_to_ids(snake_case__)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase:str = [1 if mask_padding_with_zero else 0] * len(snake_case__)
# Zero-pad up to the sequence length.
_lowerCAmelCase:Optional[int] = max_seq_length - len(snake_case__)
if pad_on_left:
_lowerCAmelCase:List[Any] = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase:Any = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase:List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase:Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case__) == max_seq_length
assert len(snake_case__) == max_seq_length
assert len(snake_case__) == max_seq_length
assert len(snake_case__) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''')
logger.info('''guid: %s''' ,example.guid)
logger.info('''tokens: %s''' ,''' '''.join([str(snake_case__) for x in tokens]))
logger.info('''input_ids: %s''' ,''' '''.join([str(snake_case__) for x in input_ids]))
logger.info('''input_mask: %s''' ,''' '''.join([str(snake_case__) for x in input_mask]))
logger.info('''segment_ids: %s''' ,''' '''.join([str(snake_case__) for x in segment_ids]))
logger.info('''label_ids: %s''' ,''' '''.join([str(snake_case__) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase:Dict = None
features.append(
InputFeatures(
input_ids=snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,label_ids=snake_case__))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( lowerCamelCase__ ):
snake_case__ = 42
snake_case__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : int ,a__ : str ,a__ : List[str] ,a__ : Union[str, Any] ,a__ : Optional[int] ,a__ : int ,a__ : Tuple = None ,a__ : List[str]=False ,a__ : List[str] = Split.train ,) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = os.path.join(
snake_case__ ,'''cached_{}_{}_{}'''.format(mode.value ,tokenizer.__class__.__name__ ,str(snake_case__)) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase:List[Any] = cached_features_file + """.lock"""
with FileLock(snake_case__):
if os.path.exists(snake_case__) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}')
_lowerCAmelCase:Union[str, Any] = torch.load(snake_case__)
else:
logger.info(F'Creating features from dataset file at {data_dir}')
_lowerCAmelCase:Dict = token_classification_task.read_examples_from_file(snake_case__ ,snake_case__)
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase:Dict = token_classification_task.convert_examples_to_features(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,cls_token_at_end=bool(model_type in ['''xlnet''']) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=snake_case__ ,pad_on_left=bool(tokenizer.padding_side == '''left''') ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'Saving features into cached file {cached_features_file}')
torch.save(self.features ,snake_case__)
def __len__( self : Dict) -> Optional[Any]:
"""simple docstring"""
return len(self.features)
def __getitem__( self : Union[str, Any] ,a__ : int) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
snake_case__ = 42
snake_case__ = -1_0_0
def __init__( self : Union[str, Any] ,a__ : Tuple ,a__ : str ,a__ : Optional[Any] ,a__ : int ,a__ : Optional[int] ,a__ : Union[str, Any] = None ,a__ : Union[str, Any]=False ,a__ : Tuple = Split.train ,) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = token_classification_task.read_examples_from_file(snake_case__ ,snake_case__)
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase:Optional[Any] = token_classification_task.convert_examples_to_features(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,cls_token_at_end=bool(model_type in ['''xlnet''']) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=snake_case__ ,pad_on_left=bool(tokenizer.padding_side == '''left''') ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase:int = tf.data.Dataset.from_generator(
snake_case__ ,({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) ,(
{'''input_ids''': tf.TensorShape([None]), '''attention_mask''': tf.TensorShape([None])},
tf.TensorShape([None]),
) ,)
else:
_lowerCAmelCase:Any = tf.data.Dataset.from_generator(
snake_case__ ,({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) ,(
{
'''input_ids''': tf.TensorShape([None]),
'''attention_mask''': tf.TensorShape([None]),
'''token_type_ids''': tf.TensorShape([None]),
},
tf.TensorShape([None]),
) ,)
def __UpperCamelCase ( self : Dict) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self : Any) -> int:
"""simple docstring"""
return len(self.features)
def __getitem__( self : List[str] ,a__ : Optional[Any]) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 227
|
def __magic_name__ ( lowercase ) -> list[list]:
"""simple docstring"""
lowercase_ : int = current_set.copy()
for row_index, row in enumerate(lowercase ):
lowercase_ : Tuple = row[0]
for column_index, column in enumerate(lowercase ):
if magnitude == 0:
lowercase_ : Optional[int] = column
continue
lowercase_ : List[str] = column / magnitude
# Subtract to cancel term
lowercase_ : List[str] = current_set[0]
lowercase_ : Optional[int] = [first_row]
lowercase_ : Optional[Any] = current_set[1::]
for row in current_set:
lowercase_ : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase )
continue
for column_index in range(len(lowercase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase_ : Tuple = final_set[0]
lowercase_ : Dict = []
lowercase_ : Any = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase_ : Optional[Any] = simplify(lowercase )
for i in range(len(lowercase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowercase )
lowercase_ : Tuple = resultant
return final_set
def __magic_name__ ( lowercase ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase_ : Tuple = len(lowercase ) + 1
if any(len(lowercase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowercase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowercase ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase_ : List[str] = equations.copy()
if any(0 in row for row in data_set ):
lowercase_ : int = data_set.copy()
lowercase_ : Dict = []
for row_index, row in enumerate(lowercase ):
if 0 not in row:
lowercase_ : Dict = data_set.pop(lowercase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowercase )
lowercase_ : Tuple = data_set.copy()
lowercase_ : List[Any] = simplify(lowercase )
lowercase_ : Dict = simplified[::-1]
lowercase_ : list = []
for row in simplified:
lowercase_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase_ : Any = row.copy()[: len(lowercase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase ) == 0:
solutions.append(0 )
continue
lowercase_ : Any = temp_row[1::]
lowercase_ : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(lowercase ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase )
lowercase_ : Any = []
for item in solutions:
final.append(float(round(lowercase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 458
| 0
|
A_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
A_ = ["a", "b", "c", "d", "e"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = start
# add current to visited
visited.append(__UpperCamelCase )
lowerCamelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
A_ = topological_sort("a", [], [])
print(sort)
| 720
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = "src/transformers"
A_ = "docs/source/en/tasks"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Find the start prompt.
lowerCamelCase_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
lowerCamelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,)
lowerCamelCase_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 384
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class snake_case__ :
def __init__( self : List[str] , _lowerCamelCase : Tuple ):
snake_case__ : Tuple = value
snake_case__ : Node | None = None
snake_case__ : Node | None = None
class snake_case__ :
def __init__( self : List[Any] , _lowerCamelCase : List[str] ):
snake_case__ : List[str] = tree
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : Optional[Any] ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13
| 0
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Union[str, Any] = int(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : Tuple = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=300 ):
'''simple docstring'''
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowercase__ : Dict = f"""{elt:.6f}""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else str(_lowerCAmelCase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase_ :
lowerCamelCase__ : Tuple = 5
lowerCamelCase__ : Optional[Any] = 0.2
def __init__( self , a , a = None , a = True , a = None , a = 3_0_0 , ) -> Union[str, Any]:
lowercase__ : Optional[int] = total
lowercase__ : Union[str, Any] = '' if prefix is None else prefix
lowercase__ : Optional[int] = leave
lowercase__ : Union[str, Any] = parent
lowercase__ : int = width
lowercase__ : Optional[Any] = None
lowercase__ : Dict = None
lowercase__ : List[Any] = None
def _UpperCAmelCase ( self , a , a = False , a = None ) -> Optional[int]:
lowercase__ : List[Any] = value
if comment is not None:
lowercase__ : Optional[int] = comment
if self.last_value is None:
lowercase__ : Dict = time.time()
lowercase__ : Optional[Any] = value
lowercase__ : Any = None
lowercase__ : Union[str, Any] = self.warmup
lowercase__ : List[str] = 1
self.update_bar(a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowercase__ : Union[str, Any] = time.time()
lowercase__ : Any = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowercase__ : Optional[int] = self.elapsed_time / (value - self.start_value)
else:
lowercase__ : Tuple = None
if value >= self.total:
lowercase__ : str = self.total
lowercase__ : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowercase__ : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(a )
lowercase__ : Any = value
lowercase__ : str = current_time
if self.average_time_per_item is None:
lowercase__ : List[str] = 1
else:
lowercase__ : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _UpperCAmelCase ( self , a , a=None ) -> Optional[int]:
lowercase__ : Optional[Any] = ' ' * (len(str(self.total ) ) - len(str(a ) )) + str(a )
if self.elapsed_time is None:
lowercase__ : Optional[int] = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowercase__ : Optional[Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowercase__ : Optional[int] = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowercase__ : Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=None ) -> Tuple:
super().__init__(a )
lowercase__ : Union[str, Any] = None if column_names is None else [column_names]
lowercase__ : Optional[int] = None
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowercase__ : str = disp.display(disp.HTML(self.html_code ) , display_id=a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self , a ) -> str:
if self.inner_table is None:
lowercase__ : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
lowercase__ : List[str] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(a )
lowercase__ : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def _UpperCAmelCase ( self , a , a=None , a=3_0_0 ) -> Union[str, Any]:
lowercase__ : Any = NotebookProgressBar(a , prefix=a , parent=self , width=a )
return self.child_bar
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : str = None
self.display()
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = None
lowercase__ : Any = None
lowercase__ : List[Any] = False
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
lowercase__ : Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
lowercase__ : int = 0
lowercase__ : Tuple = 0
lowercase__ : Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
lowercase__ : Optional[int] = NotebookTrainingTracker(state.max_steps , a )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
lowercase__ : int = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowercase__ : int = False
def _UpperCAmelCase ( self , a , a , a , a=None , **a ) -> Optional[Any]:
if not has_length(a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowercase__ : List[Any] = self.training_tracker.add_child(len(a ) )
else:
lowercase__ : Any = NotebookProgressBar(len(a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
if self.prediction_bar is not None:
self.prediction_bar.close()
lowercase__ : str = None
def _UpperCAmelCase ( self , a , a , a , a=None , **a ) -> List[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowercase__ : Any = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowercase__ : Optional[Any] = state.global_step
self.training_tracker.write_line(a )
def _UpperCAmelCase ( self , a , a , a , a=None , **a ) -> List[str]:
if self.training_tracker is not None:
lowercase__ : List[Any] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
lowercase__ : str = log['loss']
break
if self.first_column == "Epoch":
lowercase__ : Union[str, Any] = int(state.epoch )
else:
lowercase__ : str = state.global_step
lowercase__ : Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
lowercase__ : Dict = re.sub(R'\_loss$' , '' , a )
lowercase__ : Dict = metrics.pop('total_flos' , a )
lowercase__ : str = metrics.pop('epoch' , a )
lowercase__ : Any = metrics.pop(f"""{metric_key_prefix}_runtime""" , a )
lowercase__ : Optional[int] = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , a )
lowercase__ : Tuple = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , a )
lowercase__ : Optional[Any] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , a )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
lowercase__ : List[Any] = v
else:
lowercase__ : Any = k.split('_' )
lowercase__ : str = ' '.join([part.capitalize() for part in splits[1:]] )
lowercase__ : Optional[int] = v
self.training_tracker.write_line(a )
self.training_tracker.remove_child()
lowercase__ : Optional[int] = None
# Evaluation takes a long time so we should force the next update.
lowercase__ : List[str] = True
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=a )
lowercase__ : Optional[Any] = None
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=0 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : int = projection_dim
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : str = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
_lowerCAmelCase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = TFDPRContextEncoder(config=lowercase_ )
_lowerCAmelCase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_lowerCAmelCase : Any = model(lowercase_ , token_type_ids=lowercase_ )
_lowerCAmelCase : Dict = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : int = TFDPRQuestionEncoder(config=lowercase_ )
_lowerCAmelCase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_lowerCAmelCase : Tuple = model(lowercase_ , token_type_ids=lowercase_ )
_lowerCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFDPRReader(config=lowercase_ )
_lowerCAmelCase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = config_and_inputs
_lowerCAmelCase : Any = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __A ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
a_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a_ = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = TFDPRModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = TFDPRQuestionEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = TFDPRReader.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_lowerCAmelCase : Optional[int] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowerCAmelCase : Dict = model(lowercase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowerCAmelCase : Optional[int] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 424
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ) -> None:
'''simple docstring'''
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 640
| 0
|
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = DistilBertTokenizer
_A : Optional[int] = DistilBertTokenizerFast
_A : List[Any] = True
@slow
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :List[str] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
snake_case_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case )
snake_case_ :Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case )
snake_case_ :str = tokenizer.build_inputs_with_special_tokens(snake_case )
snake_case_ :str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 310
|
"""simple docstring"""
from __future__ import annotations
__a = list[tuple[int, int]]
__a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase :
'''simple docstring'''
def __init__( self: int , snake_case: int , snake_case: int , snake_case: int , snake_case: int , snake_case: float , snake_case: Node | None , ) -> Any:
snake_case_ :Tuple = pos_x
snake_case_ :Optional[Any] = pos_y
snake_case_ :List[Any] = (pos_y, pos_x)
snake_case_ :int = goal_x
snake_case_ :Optional[Any] = goal_y
snake_case_ :str = g_cost
snake_case_ :Tuple = parent
snake_case_ :Tuple = self.calculate_heuristic()
def lowerCAmelCase_ ( self: Any ) -> float:
snake_case_ :List[Any] = abs(self.pos_x - self.goal_x )
snake_case_ :Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: Any , snake_case: Optional[Any] ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: tuple[int, int] , snake_case: tuple[int, int] ) -> str:
snake_case_ :List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case )
snake_case_ :str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , snake_case )
snake_case_ :str = [self.start]
snake_case_ :list[Node] = []
snake_case_ :Dict = False
def lowerCAmelCase_ ( self: List[Any] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ :Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ :Optional[Any] = True
return self.retrace_path(snake_case )
self.closed_nodes.append(snake_case )
snake_case_ :int = self.get_successors(snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case )
else:
# retrieve the best current path
snake_case_ :Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case )
else:
self.open_nodes.append(snake_case )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ ( self: Tuple , snake_case: Node ) -> list[Node]:
snake_case_ :List[Any] = []
for action in delta:
snake_case_ :int = parent.pos_x + action[1]
snake_case_ :List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) )
return successors
def lowerCAmelCase_ ( self: List[str] , snake_case: Node | None ) -> Path:
snake_case_ :Dict = node
snake_case_ :List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ :Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a = (0, 0)
__a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__a = GreedyBestFirst(init, goal)
__a = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a = 2
for elem in grid:
print(elem)
| 310
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = AutoencoderKL
lowercase__ = 'sample'
lowercase__ = 1E-2
@property
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes).to(__a)
return {"sample": image}
@property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
# enable deterministic behavior for gradient checkpointing
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.model_class(**__a)
model.to(__a)
assert not model.is_gradient_checkpointing and model.training
_UpperCamelCase = model(**__a).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCamelCase = torch.randn_like(__a)
_UpperCamelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCamelCase = self.model_class(**__a)
# clone model
model_a.load_state_dict(model.state_dict())
model_a.to(__a)
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCamelCase = model_a(**__a).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCamelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5)
_UpperCamelCase = dict(model.named_parameters())
_UpperCamelCase = dict(model_a.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=__a)
self.assertIsNotNone(__a)
self.assertEqual(len(loading_info['''missing_keys''']) , 0)
model.to(__a)
_UpperCamelCase = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''')
_UpperCamelCase = model.to(__a)
model.eval()
if torch_device == "mps":
_UpperCamelCase = torch.manual_seed(0)
else:
_UpperCamelCase = torch.Generator(device=__a).manual_seed(0)
_UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
_UpperCamelCase = image.to(__a)
with torch.no_grad():
_UpperCamelCase = model(__a , sample_posterior=__a , generator=__a).sample
_UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCamelCase = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
])
elif torch_device == "cpu":
_UpperCamelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026])
else:
_UpperCamelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485])
self.assertTrue(torch_all_close(__a , __a , rtol=1e-2))
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(__a) for s in shape])}.npy'''
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , __a=0 , __a=(4, 3, 5_12, 5_12) , __a=False) -> str:
'''simple docstring'''
_UpperCamelCase = torch.floataa if fpaa else torch.floataa
_UpperCamelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a))).to(__a).to(__a)
return image
def UpperCAmelCase ( self , __a="CompVis/stable-diffusion-v1-4" , __a=False) -> Any:
'''simple docstring'''
_UpperCamelCase = '''fp16''' if fpaa else None
_UpperCamelCase = torch.floataa if fpaa else torch.floataa
_UpperCamelCase = AutoencoderKL.from_pretrained(
__a , subfolder='''vae''' , torch_dtype=__a , revision=__a , )
model.to(__a).eval()
return model
def UpperCAmelCase ( self , __a=0) -> Optional[Any]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(__a)
return torch.Generator(device=__a).manual_seed(__a)
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
])
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(__a)
_UpperCamelCase = self.get_generator(__a)
with torch.no_grad():
_UpperCamelCase = model(__a , generator=__a , sample_posterior=__a).sample
assert sample.shape == image.shape
_UpperCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice)
assert torch_all_close(__a , __a , atol=3e-3)
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
])
@require_torch_gpu
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model(fpaa=__a)
_UpperCamelCase = self.get_sd_image(__a , fpaa=__a)
_UpperCamelCase = self.get_generator(__a)
with torch.no_grad():
_UpperCamelCase = model(__a , generator=__a , sample_posterior=__a).sample
assert sample.shape == image.shape
_UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase = torch.tensor(__a)
assert torch_all_close(__a , __a , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
])
def UpperCAmelCase ( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(__a)
with torch.no_grad():
_UpperCamelCase = model(__a).sample
assert sample.shape == image.shape
_UpperCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice)
assert torch_all_close(__a , __a , atol=3e-3)
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
])
@require_torch_gpu
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(__a , shape=(3, 4, 64, 64))
with torch.no_grad():
_UpperCamelCase = model.decode(__a).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
_UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCamelCase = torch.tensor(__a)
assert torch_all_close(__a , __a , atol=1e-3)
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
])
@require_torch_gpu
def UpperCAmelCase ( self , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model(fpaa=__a)
_UpperCamelCase = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a)
with torch.no_grad():
_UpperCamelCase = model.decode(__a).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
_UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase = torch.tensor(__a)
assert torch_all_close(__a , __a , atol=5e-3)
@parameterized.expand([(13,), (16,), (27,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''')
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model(fpaa=__a)
_UpperCamelCase = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a)
with torch.no_grad():
_UpperCamelCase = model.decode(__a).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase = model.decode(__a).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
assert torch_all_close(__a , __a , atol=1e-1)
@parameterized.expand([(13,), (16,), (37,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''')
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(__a , shape=(3, 4, 64, 64))
with torch.no_grad():
_UpperCamelCase = model.decode(__a).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase = model.decode(__a).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
assert torch_all_close(__a , __a , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
])
def UpperCAmelCase ( self , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(__a)
_UpperCamelCase = self.get_generator(__a)
with torch.no_grad():
_UpperCamelCase = model.encode(__a).latent_dist
_UpperCamelCase = dist.sample(generator=__a)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCamelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCamelCase = torch.tensor(__a)
_UpperCamelCase = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(__a , __a , atol=__a)
| 19
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCAmelCase ( __A : str , __A : Optional[Any]=1_00 , __A : int=" " ):
a_ : Optional[int] = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def _UpperCAmelCase ( __A : dict ):
a_ , a_ : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else '''''' )
texts.append(__A )
return {"title": titles, "text": texts}
def _UpperCAmelCase ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ):
a_ : int = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
a_ : List[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _UpperCAmelCase ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a_ : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a_ : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
a_ : str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
a_ : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a_ : Union[str, Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
a_ : Union[str, Any] = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
a_ : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a_ : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__A )
# And save the index
a_ : Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case__ = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 466
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: list, a_: list, a_: int ):
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[Any] = [[0] * n for i in range(a_ )]
for i in range(a_ ):
_UpperCAmelCase : Dict = y_points[i]
for i in range(2, a_ ):
for j in range(a_, a_ ):
_UpperCAmelCase : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = XGLMTokenizer
UpperCamelCase_ : int = XGLMTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : List[str] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = "<pad>"
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_8 )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
_UpperCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Any = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "Hello World!"
_UpperCAmelCase : Union[str, Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
_UpperCAmelCase : Dict = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {
"input_ids": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 257
| 1
|
'''simple docstring'''
def _a (lowercase__ : str , lowercase__ : list[str] ) -> str:
"""simple docstring"""
__snake_case = ''
for word_or_phrase in separated:
if not isinstance(lowercase__ , lowercase__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = '''pegasus'''
lowercase = ['''past_key_values''']
lowercase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase=50_265 , lowercase=1_024 , lowercase=12 , lowercase=4_096 , lowercase=16 , lowercase=12 , lowercase=4_096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=1_024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=1 , **lowercase , ) -> Tuple:
_a : Any = vocab_size
_a : List[str] = max_position_embeddings
_a : Any = d_model
_a : Tuple = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : str = encoder_attention_heads
_a : Dict = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : Any = decoder_attention_heads
_a : List[str] = dropout
_a : List[Any] = attention_dropout
_a : List[Any] = activation_dropout
_a : int = activation_function
_a : List[Any] = init_std
_a : Any = encoder_layerdrop
_a : int = decoder_layerdrop
_a : Any = use_cache
_a : Optional[int] = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
@property
def snake_case__( self ) -> int:
return self.encoder_attention_heads
@property
def snake_case__( self ) -> int:
return self.d_model
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : str = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case__ : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408
| 1
|
'''simple docstring'''
from math import factorial
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = real
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Dict = [1] * rank
else:
UpperCamelCase : Union[str, Any] = rank
def __repr__( self ) -> List[str]:
'''simple docstring'''
return (
f'''{self.real}+'''
f'''{'+'.join(str(lowerCamelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase )
def __add__( self , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
return Dual(self.real + other , self.duals )
UpperCamelCase : Optional[Any] = self.duals.copy()
UpperCamelCase : List[Any] = other.duals.copy()
if len(lowerCamelCase ) > len(lowerCamelCase ):
o_dual.extend([1] * (len(lowerCamelCase ) - len(lowerCamelCase )) )
elif len(lowerCamelCase ) < len(lowerCamelCase ):
s_dual.extend([1] * (len(lowerCamelCase ) - len(lowerCamelCase )) )
UpperCamelCase : List[str] = []
for i in range(len(lowerCamelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase )
__SCREAMING_SNAKE_CASE = __add__
def __sub__( self , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return self + other * -1
def __mul__( self , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : List[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase )
UpperCamelCase : Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase )
__SCREAMING_SNAKE_CASE = __mul__
def __truediv__( self , lowerCamelCase ) -> Any:
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase )
raise ValueError
def __floordiv__( self , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase )
raise ValueError
def __pow__( self , lowerCamelCase ) -> Any:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase : Optional[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def A__ ( A : List[Any] , A : Any , A : Optional[int]):
'''simple docstring'''
if not callable(A):
raise ValueError("differentiate() requires a function as input for func")
if not isinstance(A , (float, int)):
raise ValueError("differentiate() requires a float as input for position")
if not isinstance(A , A):
raise ValueError("differentiate() requires an int as input for order")
UpperCamelCase : Union[str, Any] = Dual(A , 1)
UpperCamelCase : Optional[Any] = func(A)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A)
if __name__ == "__main__":
import doctest
doctest.testmod()
def A__ ( A : Any):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 435
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435
| 1
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowercase = s_dict.pop(__magic_name__ )
elif "subsample" in key:
lowercase = s_dict.pop(__magic_name__ )
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase = emb.weight.data
return lin_layer
def __snake_case ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
lowercase = torch.load(__magic_name__ , map_location="cpu" )
lowercase = mam_aaa["args"]
lowercase = mam_aaa["model"]
lowercase = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(__magic_name__ )
rename_keys(__magic_name__ )
lowercase = state_dict["decoder.embed_tokens.weight"].shape[0]
lowercase = args.share_decoder_input_output_embed
lowercase = [int(__magic_name__ ) for i in args.conv_kernel_sizes.split("," )]
lowercase = SpeechaTextConfig(
vocab_size=__magic_name__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__magic_name__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=__magic_name__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__magic_name__ , num_beams=5 , max_length=200 , use_cache=__magic_name__ , decoder_start_token_id=2 , early_stopping=__magic_name__ , )
lowercase = SpeechaTextForConditionalGeneration(__magic_name__ )
lowercase , lowercase = model.model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if len(__magic_name__ ) > 0 and not set(__magic_name__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
lowercase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase = lm_head_weights
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case : int = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 441
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_snake_case : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class UpperCamelCase_ ( __a ):
'''simple docstring'''
def __init__( self :Tuple , **lowerCAmelCase__ :Dict ) ->int:
super().__init__(**lowerCAmelCase__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Tuple , **lowerCAmelCase__ :Union[str, Any] ) ->List[Any]:
lowercase = {}
lowercase = {}
lowercase = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowercase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowercase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowercase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowercase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowercase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowercase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowercase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowercase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowercase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowercase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :List[str] , lowerCAmelCase__ :int , *lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :List[str] ) ->Optional[int]:
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :float = 512 / 1500 , lowerCAmelCase__ :Optional[int] = 32 , lowerCAmelCase__ :Optional[int] = 1 , ) ->Any:
lowercase = load_image(lowerCAmelCase__ )
lowercase = self.image_processor.size["longest_edge"]
lowercase , lowercase , lowercase , lowercase = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = self.image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowercase = self.get_inference_context()
with inference_context():
lowercase = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
lowercase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowercase = image_embeddings
lowercase = grid_points.shape[1]
lowercase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = grid_points[:, i : i + points_per_batch, :, :]
lowercase = input_labels[:, i : i + points_per_batch]
lowercase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict=0.88 , lowerCAmelCase__ :Dict=0.95 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :int=1 , ) ->str:
lowercase = model_inputs.pop("input_boxes" )
lowercase = model_inputs.pop("is_last" )
lowercase = model_inputs.pop("original_sizes" ).tolist()
lowercase = model_inputs.pop("reshaped_input_sizes" ).tolist()
lowercase = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase = model_outputs["pred_masks"]
lowercase = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
lowercase = model_outputs["iou_scores"]
lowercase , lowercase , lowercase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=False , lowerCAmelCase__ :int=0.7 , ) ->List[Any]:
lowercase = []
lowercase = []
lowercase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase , lowercase , lowercase , lowercase = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
lowercase = {}
if output_rle_mask:
lowercase = rle_mask
if output_bboxes_mask:
lowercase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 441
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__lowerCamelCase : Any =Vector()
def __lowercase ( self :Dict ):
__lowerCamelCase : Tuple =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__lowercase ) , '''(0,0,0,0,0,1)''' )
def __lowercase ( self :Dict ):
__lowerCamelCase : int =Vector([1, 2, 3, 4] )
self.assertEqual(len(__lowercase ) , 4 )
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[Any] =Vector([1, 2] )
__lowerCamelCase : Dict =Vector([1, 2, 3, 4, 5] )
__lowerCamelCase : List[Any] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowerCamelCase : int =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Tuple =Vector([1, 2, 3] )
__lowerCamelCase : Any =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowercase ( self :str ):
__lowerCamelCase : Union[str, Any] =Vector([1, 2, 3] )
__lowerCamelCase : int =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowercase ( self :int ):
__lowerCamelCase : List[Any] =Vector([1, 2, 3] )
__lowerCamelCase : List[Any] =Vector([2, -1, 4] ) # for test of dot product
__lowerCamelCase : Any =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowercase ( self :List[Any] ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowercase ( self :Union[str, Any] ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Any =Vector([1, 2, 3] )
__lowerCamelCase : Optional[int] =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __lowercase , __lowercase ) ) , '''(3,4,7)''' )
def __lowercase ( self :Dict ):
__lowerCamelCase : List[Any] =Vector([1, 0, 0, 0, 0, 0] )
__lowerCamelCase : Optional[int] =x.copy()
self.assertEqual(str(__lowercase ) , str(__lowercase ) )
def __lowercase ( self :int ):
__lowerCamelCase : str =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__lowercase ) , '''(0,1,0)''' )
def __lowercase ( self :int ):
__lowerCamelCase : Any =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) )
def __lowercase ( self :int ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[Any] =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__lowercase , __lowercase ) )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Tuple =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__lowercase , __lowercase ) )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowercase ( self :int ):
__lowerCamelCase : Union[str, Any] =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__lowerCamelCase : Tuple =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) )
def __lowercase ( self :str ):
__lowerCamelCase : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[str] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Optional[int] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowercase ( self :Any ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 363
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : List[Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowercase_ : int = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
lowercase_ : Optional[Any] = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_INIT_CONFIGURATION
__A = ['''input_ids''', '''attention_mask''']
__A = DistilBertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**_lowerCAmelCase )
lowercase = do_lower_case
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Any:
'''simple docstring'''
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 588
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def _a ( self ) -> int:
'''simple docstring'''
def extract(*_lowerCAmelCase , **_lowerCAmelCase ):
class __UpperCamelCase :
def __init__( self ) -> List[str]:
'''simple docstring'''
lowercase = torch.ones([0] )
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , )
lowercase = output.images
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((760, 504) )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
lowercase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase = init_image.resize((768, 512) )
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 588
| 1
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( __UpperCamelCase ) -> int:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> Dict:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__magic_name__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 190
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
__magic_name__ = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ = model_name.find('''patch''' )
__magic_name__ = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ = XCLIPVisionConfig(patch_size=__UpperCamelCase , num_frames=__UpperCamelCase )
if "large" in model_name:
__magic_name__ = 768
__magic_name__ = 3072
__magic_name__ = 12
__magic_name__ = 1024
__magic_name__ = 4096
__magic_name__ = 16
__magic_name__ = 24
__magic_name__ = 768
__magic_name__ = 3072
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ = 336
__magic_name__ = XCLIPConfig.from_text_vision_configs(__UpperCamelCase , __UpperCamelCase )
if "large" in model_name:
__magic_name__ = 768
return config
def lowercase ( __UpperCamelCase ) -> Dict:
# text encoder
if name == "token_embedding.weight":
__magic_name__ = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__magic_name__ = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str:
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(__UpperCamelCase )
if "attn.in_proj" in key:
__magic_name__ = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ = key_split[3]
__magic_name__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ = val[
:dim, :
]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[
-dim:, :
]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ = val[
:dim, :
]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[
-dim:, :
]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ = key_split[2]
__magic_name__ = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[dim : dim * 2, :]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = key_split[2]
__magic_name__ = config.text_config.hidden_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[-dim:]
else:
__magic_name__ = rename_key(__UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ = val.T
__magic_name__ = val
return orig_state_dict
def lowercase ( __UpperCamelCase ) -> Any:
if num_frames == 8:
__magic_name__ = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__magic_name__ = '''eating_spaghetti.npy'''
elif num_frames == 32:
__magic_name__ = '''eating_spaghetti_32_frames.npy'''
__magic_name__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=__UpperCamelCase , repo_type='''dataset''' , )
__magic_name__ = np.load(__UpperCamelCase )
return list(__UpperCamelCase )
def lowercase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ) -> Tuple:
__magic_name__ = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ = model_to_url[model_name]
__magic_name__ = 8
if "16-frames" in model_name:
__magic_name__ = 16
elif "shot" in model_name:
__magic_name__ = 32
__magic_name__ = get_xclip_config(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = XCLIPModel(__UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ = '''pytorch_model.bin'''
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase )
__magic_name__ = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
else:
__magic_name__ = torch.hub.load_state_dict_from_url(__UpperCamelCase )['''model''']
__magic_name__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = XCLIPModel(__UpperCamelCase )
__magic_name__ , __magic_name__ = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__magic_name__ = VideoMAEImageProcessor(size=__UpperCamelCase )
__magic_name__ = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ = XCLIPProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
__magic_name__ = prepare_video(__UpperCamelCase )
__magic_name__ = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=__UpperCamelCase , return_tensors='''pt''' , padding=__UpperCamelCase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ = model(**__UpperCamelCase )
# Verify outputs
__magic_name__ = outputs.logits_per_video
__magic_name__ = logits_per_video.softmax(dim=1 )
print('''Probs:''' , __UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
__magic_name__ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
__magic_name__ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(__UpperCamelCase , organization='''nielsr''' )
processor.push_to_hub(__UpperCamelCase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(__UpperCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 190
| 1
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__magic_name__ : Union[str, Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__magic_name__ : Union[str, Any] = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__magic_name__ : Tuple = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def snake_case ( self : Optional[int] , __A : Optional[Any] , __A : Tuple , __A : Optional[int]=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__A , __A , sample_weight=__A ) ),
}
| 497
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__magic_name__ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__magic_name__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A__ ( A_ ) -> Any:
with open(A_ , "rb" ) as f:
_lowercase = Image.open(A_ )
return im.convert("RGB" )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
UpperCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : int ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase__ )} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A__ ( A_ ) -> Optional[Any]:
_lowercase = torch.stack([example["pixel_values"] for example in examples] )
_lowercase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A_ , A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase = {}
if data_args.train_dir is not None:
_lowercase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_lowercase = os.path.join(data_args.validation_dir , "**" )
_lowercase = load_dataset(
"imagefolder" , data_files=A_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowercase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
_lowercase = dataset["train"].train_test_split(data_args.train_val_split )
_lowercase = split["train"]
_lowercase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowercase = dataset["train"].features["labels"].names
_lowercase , _lowercase = {}, {}
for i, label in enumerate(A_ ):
_lowercase = str(A_ )
_lowercase = label
# Load the accuracy metric from the datasets package
_lowercase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A_ ) , labelaid=A_ , idalabel=A_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowercase = image_processor.size["shortest_edge"]
else:
_lowercase = (image_processor.size["height"], image_processor.size["width"])
_lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_lowercase = Compose(
[
RandomResizedCrop(A_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_lowercase = Compose(
[
Resize(A_ ),
CenterCrop(A_ ),
ToTensor(),
normalize,
] )
def train_transforms(A_ ):
_lowercase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A_ ):
_lowercase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowercase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowercase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A_ )
# Initalize our trainer
_lowercase = Trainer(
model=A_ , args=A_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A_ , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
_lowercase = None
if training_args.resume_from_checkpoint is not None:
_lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase = last_checkpoint
_lowercase = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowercase = trainer.evaluate()
trainer.log_metrics("eval" , A_ )
trainer.save_metrics("eval" , A_ )
# Write model card and (optionally) push to hub
_lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 497
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase_ = [144, 192, 240]
UpperCAmelCase_ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase_ = [96, 120, 144]
UpperCAmelCase_ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase_ = [64, 80, 96]
UpperCAmelCase_ = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase_ = 0.05
UpperCAmelCase_ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
UpperCAmelCase_ = 512
UpperCAmelCase_ = 16
UpperCAmelCase_ = 21
UpperCAmelCase_ = '''pascal-voc-id2label.json'''
else:
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(1 , 6 ):
if f"layer_{i}." in name:
UpperCAmelCase_ = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
UpperCAmelCase_ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
UpperCAmelCase_ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
UpperCAmelCase_ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
UpperCAmelCase_ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase_ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase_ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
UpperCAmelCase_ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
UpperCAmelCase_ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
UpperCAmelCase_ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
UpperCAmelCase_ = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
UpperCAmelCase_ = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
UpperCAmelCase_ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
UpperCAmelCase_ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
UpperCAmelCase_ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
UpperCAmelCase_ = name.replace(f".global_rep.{i}.weight" , '''.layernorm.weight''' )
if f".global_rep.{i}.bias" in name:
UpperCAmelCase_ = name.replace(f".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
UpperCAmelCase_ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
UpperCAmelCase_ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
UpperCAmelCase_ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
UpperCAmelCase_ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
UpperCAmelCase_ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase_ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
UpperCAmelCase_ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase_ = '''mobilevit.''' + name
return name
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__UpperCAmelCase )
if key[:8] == "encoder.":
UpperCAmelCase_ = key[8:]
if "qkv" in key:
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_split[0][6:] ) - 1
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
UpperCAmelCase_ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase_ = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
UpperCAmelCase_ = get_mobilevit_config(__UpperCAmelCase )
# load original state_dict
UpperCAmelCase_ = torch.load(__UpperCAmelCase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
UpperCAmelCase_ = MobileViTForSemanticSegmentation(__UpperCAmelCase ).eval()
else:
UpperCAmelCase_ = MobileViTForImageClassification(__UpperCAmelCase ).eval()
UpperCAmelCase_ = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = model(**__UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase_ = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase_ = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase_ = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase_ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase_ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase_ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
UpperCAmelCase_ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase_ = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCAmelCase , organization='''apple''' )
model.push_to_hub(__UpperCAmelCase , organization='''apple''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 719
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__UpperCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = _distribute_shards(**__UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _split_gen_kwargs(__UpperCAmelCase , __UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(__UpperCAmelCase )
else:
UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
assert out == expected
| 561
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : Optional[int] , a : Dict=7 , a : Union[str, Any]=3 , a : List[Any]=30 , a : int=400 , a : Tuple=True , a : int=None , a : Optional[int]=True , a : int=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , a : int=True , a : str=1 / 255 , a : str=True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : List[Any] = min_resolution
SCREAMING_SNAKE_CASE : Dict = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Tuple = do_normalize
SCREAMING_SNAKE_CASE : int = image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : List[str] , a : Optional[int] , a : Optional[Any]=False ) -> Any:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Tuple = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = image.size
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : Optional[int] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE : List[str] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Dict = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Optional[int] = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE : Optional[int] = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ConditionalDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a )
SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(a , batched=a )
SCREAMING_SNAKE_CASE : Any = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Tuple = json.loads(f.read() )
SCREAMING_SNAKE_CASE : int = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images=a , annotations=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify orig_size
SCREAMING_SNAKE_CASE : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : str = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE : List[str] = ConditionalDetrImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify masks
SCREAMING_SNAKE_CASE : Dict = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
| 25
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , config=__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with pytest.raises(__magic_name__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __magic_name__ )
else:
self.assertEqual(tokenizer.do_lower_case , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__magic_name__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
UpperCamelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TOKENIZER_MAPPING.values()
UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__magic_name__ ) , __magic_name__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__magic_name__ )
UpperCamelCase = """Hello, world. How are you?"""
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
UpperCamelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__magic_name__ )
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = get_tokenizer_config("""bert-base-cased""" )
UpperCamelCase = config.pop("""_commit_hash""" , __magic_name__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__magic_name__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase = get_tokenizer_config(__magic_name__ )
self.assertDictEqual(__magic_name__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = get_tokenizer_config(__magic_name__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
UpperCamelCase = CustomTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
# Can register in two steps
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__magic_name__ , slow_tokenizer_class=__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = BertTokenizerFast.from_pretrained(__magic_name__ )
bert_tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
class UpperCAmelCase ( __snake_case ):
lowercase = False
class UpperCAmelCase ( __snake_case ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
__magic_name__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
__magic_name__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 386
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
_lowercase = {}
def snake_case ( self : List[Any] , __A : str ):
"""simple docstring"""
_lowercase = {}
def snake_case ( self : Optional[int] , __A : str , __A : str , __A : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__A )
if nodea not in self.connections:
self.add_node(__A )
_lowercase = probability
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return list(self.connections )
def snake_case ( self : Union[str, Any] , __A : str ):
"""simple docstring"""
_lowercase = 0
_lowercase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A__ ( A_ , A_ , A_ ) -> dict[str, int]:
_lowercase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A_ , A_ , A_ )
_lowercase = Counter(graph.get_nodes() )
_lowercase = start
for _ in range(A_ ):
_lowercase = graph.transition(A_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
import math
def A__ ( A_ , A_ = 0 , A_ = 0 ) -> list:
_lowercase = end or len(A_ )
for i in range(A_ , A_ ):
_lowercase = i
_lowercase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase = array[temp_index - 1]
temp_index -= 1
_lowercase = temp_index_value
return array
def A__ ( A_ , A_ , A_ ) -> None: # Max Heap
_lowercase = index
_lowercase = 2 * index + 1 # Left Node
_lowercase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase = right_index
if largest != index:
_lowercase , _lowercase = array[largest], array[index]
heapify(A_ , A_ , A_ )
def A__ ( A_ ) -> list:
_lowercase = len(A_ )
for i in range(n // 2 , -1 , -1 ):
heapify(A_ , A_ , A_ )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase = array[0], array[i]
heapify(A_ , 0 , A_ )
return array
def A__ ( A_ , A_ , A_ , A_ ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A__ ( A_ , A_ , A_ , A_ ) -> int:
_lowercase = low
_lowercase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase = array[j], array[i]
i += 1
def A__ ( A_ ) -> list:
if len(A_ ) == 0:
return array
_lowercase = 2 * math.ceil(math.loga(len(A_ ) ) )
_lowercase = 16
return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ )
def A__ ( A_ , A_ , A_ , A_ , A_ ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
_lowercase = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase = partition(A_ , A_ , A_ , A_ )
intro_sort(A_ , A_ , A_ , A_ , A_ )
_lowercase = p
return insertion_sort(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : List[str] = input('''Enter numbers separated by a comma : ''').strip()
__magic_name__ : Dict = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 602
| 0
|
'''simple docstring'''
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(UpperCAmelCase ) * abs(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 531
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowercase )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowercase )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase )
lowerCAmelCase = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=lowercase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) )
| 532
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : str )-> None:
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 720
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case = inspect.getfile(accelerate.test_utils )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case = [sys.executable] + distributed_args
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 517
| 0
|
# flake8: noqa
# Lint as: python3
UpperCAmelCase_ = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 2
|
from math import factorial, radians
def __A ( __lowerCamelCase , __lowerCamelCase = 18 , __lowerCamelCase = 10 ) -> float:
a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a = radians(__lowerCamelCase )
a = angle_in_radians
a = 3
a = -1
for _ in range(__lowerCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCamelCase )
a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 468
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self : Optional[int] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PIL.Image.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Union[str, Any] , ):
super().__init__(**lowercase_ )
lowercase_ : Any = size if size is not None else {"""height""": 256, """width""": 256}
lowercase_ : str = get_size_dict(lowercase_ )
lowercase_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ : Dict = get_size_dict(lowercase_ , param_name="""crop_size""" )
lowercase_ : Dict = do_resize
lowercase_ : List[str] = size
lowercase_ : List[str] = resample
lowercase_ : Tuple = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : List[str] = do_rescale
lowercase_ : Any = rescale_factor
lowercase_ : Optional[Any] = do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ):
lowercase_ : Optional[int] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size["""height"""], size["""width"""]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
lowercase_ : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : str=None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ):
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ : List[str] = image_std if image_std is not None else self.image_std
lowercase_ : Tuple = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(lowercase_ )
lowercase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[str] = get_size_dict(lowercase_ , param_name="""crop_size""" )
lowercase_ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase_ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase_ : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ : Tuple = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : int , **__magic_name__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695
| 0
|
def __lowerCAmelCase ( snake_case : Optional[int] ) -> bool:
__lowerCamelCase: List[str] = [int(a__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(a__ ) == 4 and all(0 <= int(a__ ) <= 254 for octet in octets )
if __name__ == "__main__":
_A : Optional[Any] = input().strip()
_A : str = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 716
|
from sklearn.metrics import fa_score
import datasets
_A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_A : Dict = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_A : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : List[str]="binary" , SCREAMING_SNAKE_CASE_ : List[str]=None ):
__lowerCamelCase: str = fa_score(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , pos_label=SCREAMING_SNAKE_CASE_ , average=SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ )
return {"f1": float(SCREAMING_SNAKE_CASE_ ) if score.size == 1 else score}
| 189
| 0
|
from math import isclose, sqrt
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = point_y / 4 / point_x
__snake_case = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__snake_case = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__snake_case = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__snake_case = outgoing_gradient**2 + 4
__snake_case = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__snake_case = (point_y - outgoing_gradient * point_x) ** 2 - 100
__snake_case = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__snake_case = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__snake_case = x_minus if isclose(lowerCamelCase_ , lowerCamelCase_ ) else x_plus
__snake_case = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def A ( snake_case__ : float = 1.4 , snake_case__ : float = -9.6 ) -> Any:
'''simple docstring'''
__snake_case = 0
__snake_case = first_x_coord
__snake_case = first_y_coord
__snake_case = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__snake_case = next_point(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
snake_case : Union[str, Any] = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
snake_case : int = flax_key_tuple[:-1] + ("weight",)
snake_case : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : Any = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str ):
"""simple docstring"""
if "metadata" in layer:
snake_case : Dict = layer.split("metadata" )
snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
snake_case : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
snake_case : List[str] = layer.split("kvstore" )
snake_case : Tuple = "".join(split_layer[0] )[:-1]
snake_case : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
snake_case : List[Any] = layer.split("/" )
snake_case : Union[str, Any] = "/".join(split_layer[:-1] )
snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case : Tuple = "file"
else:
snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = rename_keys(lowerCamelCase_ )
snake_case : str = {}
for k, v in current_block.items():
snake_case : List[str] = v
snake_case : List[str] = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict , lowerCamelCase_: int , lowerCamelCase_: str = WEIGHTS_NAME ):
"""simple docstring"""
snake_case : List[str] = convert_file_size_to_int(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : Dict = {}
snake_case : str = 0
snake_case : List[str] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
snake_case : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case : Union[str, Any] = flatten_dict(lowerCamelCase_ , sep="/" )
snake_case : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
snake_case : str = content
else:
snake_case : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case : Tuple = torch.tensor(lowerCamelCase_ )
snake_case : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case : Dict = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
snake_case : Union[str, Any] = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case : str = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case : Any = {}
snake_case : Union[str, Any] = 0
snake_case : Any = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case : List[Any] = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case : List[Any] = {}
snake_case : Dict = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case : Union[str, Any] = shard
for key in shard:
snake_case : List[Any] = shard_file
# Add the metadata
snake_case : Optional[int] = {"total_size": total_size}
snake_case : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
snake_case : Tuple = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
snake_case : Dict = TaTokenizer.from_pretrained("t5-small" )
snake_case : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case : Dict = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case : Optional[int] = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 449
| 0
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCAmelCase__ : str = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowercase ) ,version.parse(__lowercase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = None ) -> List[Any]:
UpperCAmelCase_ : int = f"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' ,__lowercase ):
UpperCAmelCase_ : List[str] = requirement, None, None
else:
UpperCAmelCase_ : Optional[int] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' ,__lowercase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f""" got {requirement}""" )
UpperCAmelCase_ : Tuple = match[0]
UpperCAmelCase_ : Optional[Any] = want_full.split(',' ) # there could be multiple requirements
UpperCAmelCase_ : Optional[int] = {}
for w in want_range:
UpperCAmelCase_ : int = re.findall(r'^([\s!=<>]{1,2})(.+)' ,__lowercase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f""" but got {requirement}""" )
UpperCAmelCase_ : Any = match[0]
UpperCAmelCase_ : int = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase_ : Union[str, Any] = '.'.join([str(__lowercase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return
# check if any version is installed
try:
UpperCAmelCase_ : Dict = importlib.metadata.version(__lowercase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The \'{requirement}\' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Dict:
UpperCAmelCase_ : int = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(__lowercase ,__lowercase )
| 704
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowercase ( a_ ):
def __get__( self , _snake_case , _snake_case=None) -> List[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
UpperCAmelCase_ : str = '__cached_' + self.fget.__name__
UpperCAmelCase_ : Union[str, Any] = getattr(_snake_case , _snake_case , _snake_case)
if cached is None:
UpperCAmelCase_ : Union[str, Any] = self.fget(_snake_case)
setattr(_snake_case , _snake_case , _snake_case)
return cached
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> str:
if is_torch_fx_proxy(UpperCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase ,(jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase ,np.ndarray )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
return isinstance(UpperCamelCase ,np.ndarray )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Union[str, Any]:
return _is_numpy(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
import torch
return isinstance(UpperCamelCase ,torch.Tensor )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> str:
import torch
return isinstance(UpperCamelCase ,torch.device )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> str:
return False if not is_torch_available() else _is_torch_device(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
import torch
if isinstance(UpperCamelCase ,UpperCamelCase ):
if hasattr(UpperCamelCase ,UpperCamelCase ):
UpperCAmelCase_ : Any = getattr(UpperCamelCase ,UpperCamelCase )
else:
return False
return isinstance(UpperCamelCase ,torch.dtype )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
import tensorflow as tf
return isinstance(UpperCamelCase ,tf.Tensor )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase ,'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(UpperCamelCase )
return type(UpperCamelCase ) == tf.Tensor
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase ,jnp.ndarray )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Tuple:
return False if not is_flax_available() else _is_jax(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Any:
if isinstance(UpperCamelCase ,(dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase ) for k, v in obj.items()}
elif isinstance(UpperCamelCase ,(list, tuple) ):
return [to_py_obj(UpperCamelCase ) for o in obj]
elif is_tf_tensor(UpperCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase ):
return np.asarray(UpperCamelCase ).tolist()
elif isinstance(UpperCamelCase ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
if isinstance(UpperCamelCase ,(dict, UserDict) ):
return {k: to_numpy(UpperCamelCase ) for k, v in obj.items()}
elif isinstance(UpperCamelCase ,(list, tuple) ):
return np.array(UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase ):
return np.asarray(UpperCamelCase )
else:
return obj
class lowercase ( a_ ):
def _snake_case ( self) -> Any:
UpperCAmelCase_ : Dict = fields(self)
# Safety and consistency checks
if not len(_snake_case):
raise ValueError(F"""{self.__class__.__name__} has no fields.""")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""")
UpperCAmelCase_ : int = getattr(self , class_fields[0].name)
UpperCAmelCase_ : Union[str, Any] = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(_snake_case):
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ : str = first_field.items()
UpperCAmelCase_ : Any = True
else:
try:
UpperCAmelCase_ : str = iter(_snake_case)
UpperCAmelCase_ : List[str] = True
except TypeError:
UpperCAmelCase_ : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_snake_case):
if (
not isinstance(_snake_case , (list, tuple))
or not len(_snake_case) == 2
or not isinstance(element[0] , _snake_case)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase_ : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""")
break
setattr(self , element[0] , element[1])
if element[1] is not None:
UpperCAmelCase_ : Tuple = element[1]
elif first_field is not None:
UpperCAmelCase_ : Dict = first_field
else:
for field in class_fields:
UpperCAmelCase_ : Union[str, Any] = getattr(self , field.name)
if v is not None:
UpperCAmelCase_ : Union[str, Any] = v
def __delitem__( self , *_snake_case , **_snake_case) -> Optional[Any]:
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""")
def _snake_case ( self , *_snake_case , **_snake_case) -> Any:
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""")
def _snake_case ( self , *_snake_case , **_snake_case) -> str:
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""")
def _snake_case ( self , *_snake_case , **_snake_case) -> Any:
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""")
def __getitem__( self , _snake_case) -> Optional[Any]:
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ : Dict = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _snake_case , _snake_case) -> Optional[int]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_snake_case , _snake_case)
super().__setattr__(_snake_case , _snake_case)
def __setitem__( self , _snake_case , _snake_case) -> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(_snake_case , _snake_case)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_snake_case , _snake_case)
def _snake_case ( self) -> Tuple[Any]:
return tuple(self[k] for k in self.keys())
class lowercase ( a_, a_ ):
@classmethod
def _snake_case ( cls , _snake_case) -> List[str]:
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}""")
class lowercase ( a_ ):
_lowerCamelCase : Tuple= "longest"
_lowerCamelCase : Tuple= "max_length"
_lowerCamelCase : int= "do_not_pad"
class lowercase ( a_ ):
_lowerCamelCase : Union[str, Any]= "pt"
_lowerCamelCase : Any= "tf"
_lowerCamelCase : Dict= "np"
_lowerCamelCase : Tuple= "jax"
class lowercase :
def __init__( self , _snake_case) -> Tuple:
UpperCAmelCase_ : int = context_managers
UpperCAmelCase_ : Any = ExitStack()
def __enter__( self) -> Optional[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(_snake_case)
def __exit__( self , *_snake_case , **_snake_case) -> List[Any]:
self.stack.__exit__(*_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> int:
UpperCAmelCase_ : Dict = infer_framework(UpperCamelCase )
if framework == "tf":
UpperCAmelCase_ : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase_ : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase_ : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = model_class.__name__
UpperCAmelCase_ : Optional[int] = infer_framework(UpperCamelCase )
if framework == "tf":
UpperCAmelCase_ : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase_ : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase_ : str = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = "" ,UpperCamelCase = "." ) -> List[Any]:
def _flatten_dict(UpperCamelCase ,UpperCamelCase="" ,UpperCamelCase="." ):
for k, v in d.items():
UpperCAmelCase_ : Any = str(UpperCamelCase ) + delimiter + str(UpperCamelCase ) if parent_key else k
if v and isinstance(UpperCamelCase ,UpperCamelCase ):
yield from flatten_dict(UpperCamelCase ,UpperCamelCase ,delimiter=UpperCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) )
@contextmanager
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = False ) -> Any:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase=None ) -> Optional[Any]:
if is_numpy_array(UpperCamelCase ):
return np.transpose(UpperCamelCase ,axes=UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.T if axes is None else array.permute(*UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.transpose(UpperCamelCase ,perm=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.transpose(UpperCamelCase ,axes=UpperCamelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(UpperCamelCase )}.""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> int:
if is_numpy_array(UpperCamelCase ):
return np.reshape(UpperCamelCase ,UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.reshape(*UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.reshape(UpperCamelCase ,UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.reshape(UpperCamelCase ,UpperCamelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(UpperCamelCase )}.""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase=None ) -> Optional[int]:
if is_numpy_array(UpperCamelCase ):
return np.squeeze(UpperCamelCase ,axis=UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase ,axis=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.squeeze(UpperCamelCase ,axis=UpperCamelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(UpperCamelCase )}.""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
if is_numpy_array(UpperCamelCase ):
return np.expand_dims(UpperCamelCase ,UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.unsqueeze(dim=UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase ,axis=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.expand_dims(UpperCamelCase ,axis=UpperCamelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Optional[Any]:
if is_numpy_array(UpperCamelCase ):
return np.size(UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.numel()
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.size(UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCamelCase ,(tuple, list) ):
UpperCAmelCase_ : Union[str, Any] = [f"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase_ : Any = f"""{repo_id}--{value}"""
return auto_map
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCamelCase ):
UpperCAmelCase_ : int = base_class.__module__
UpperCAmelCase_ : Dict = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 471
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """visual_bert"""
def __init__( self , lowerCAmelCase=30_522 , lowerCAmelCase=768 , lowerCAmelCase=512 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =hidden_size
_lowercase =visual_embedding_dim
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =type_vocab_size
_lowercase =layer_norm_eps
_lowercase =bypass_transformer
_lowercase =special_visual_initialize
| 291
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = False ) -> str:
'''simple docstring'''
_lowercase =scheduler
_lowercase =optimizers if isinstance(lowerCAmelCase , (list, tuple) ) else [optimizers]
_lowercase =split_batches
_lowercase =step_with_optimizer
_lowercase =GradientState()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowercase =AcceleratorState().num_processes
for _ in range(lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.scheduler.state_dict()
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCAmelCase , **lowerCAmelCase )
| 291
| 1
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
lowerCAmelCase_ = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
lowerCAmelCase_ = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
lowerCAmelCase_ = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
lowerCAmelCase_ = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
for tf_name, hf_name in patterns:
_SCREAMING_SNAKE_CASE : Any = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return k
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : str = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = torch_model.state_dict()
_SCREAMING_SNAKE_CASE : int = {}
# separating decoder weights
_SCREAMING_SNAKE_CASE : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_SCREAMING_SNAKE_CASE : Optional[int] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
_SCREAMING_SNAKE_CASE : Tuple = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = DECODER_PATTERNS
_SCREAMING_SNAKE_CASE : List[Any] = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_SCREAMING_SNAKE_CASE : str = v.T
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
_SCREAMING_SNAKE_CASE : int = REMAINING_PATTERNS
_SCREAMING_SNAKE_CASE : int = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_SCREAMING_SNAKE_CASE : Any = v.T
_SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_SCREAMING_SNAKE_CASE : str = mapping["""model.embed_positions.weight"""]
_SCREAMING_SNAKE_CASE : Tuple = mapping.pop("""model.embed_positions.weight""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : int = ["""global_step"""]
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict""" ):
_SCREAMING_SNAKE_CASE : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 707
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : str = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : List[Any] = -1
_SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0])
_SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A)
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A)
thread.start()
_SCREAMING_SNAKE_CASE : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""")
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A)
_SCREAMING_SNAKE_CASE : int = -1
_SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Tuple = -1
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : str = """"""
for new_text in streamer:
streamer_text += new_text
| 635
| 0
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCAmelCase__ :List[Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(__snake_case )
if number < 0:
return False
lowerCAmelCase__ :Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __UpperCAmelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = state_dict.pop(__A )
UpperCAmelCase__ = val
def __UpperCAmelCase ( __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
return new_state_dict
def __UpperCAmelCase ( __A , __A=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = ""
if is_panoptic:
UpperCAmelCase__ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias[:2_5_6]
UpperCAmelCase__ = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias[-2_5_6:]
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase__ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase__ = True
UpperCAmelCase__ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase__ = 2_5_0
else:
UpperCAmelCase__ = 9_1
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "coco-detection-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A , __A , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase__ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase__ = ConditionalDetrImageProcessor(format=__A )
# prepare image
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__A , return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase__ = torch.hub.load("DeppMeng/ConditionalDETR" , __A , pretrained=__A ).eval()
UpperCAmelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase__ = "conditional_detr." + src
rename_key(__A , __A , __A )
UpperCAmelCase__ = rename_backbone_keys(__A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase__ = state_dict.pop(__A )
UpperCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase__ = state_dict.pop(__A )
UpperCAmelCase__ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase__ = state_dict.pop(__A )
UpperCAmelCase__ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase__ = state_dict.pop(__A )
UpperCAmelCase__ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase__ = ConditionalDetrForSegmentation(__A ) if is_panoptic else ConditionalDetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
model.push_to_hub(repo_id=__A , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase__ = conditional_detr(__A )
UpperCAmelCase__ = model(__A )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 705
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A = logging.getLogger(__name__)
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to SortishSamler or not.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'whether to use adafactor'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Dropout probability. Goes into model.config.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
A__= field(
default='linear' , metadata={'help': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 277
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=snake_case_ , )
assert hasattr(self , '''env''' )
def __a ( self , snake_case_=1 ) -> str:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-single' , instance_count=snake_case_ , instance_type=self.instance_type , debugger_hook_config=snake_case_ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def __a ( self , snake_case_ ) -> Dict:
TrainingJobAnalytics(snake_case_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def __a ( self ) -> Any:
# create estimator
SCREAMING_SNAKE_CASE : int =self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : int =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : Optional[Any] =list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Any =list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : Dict =(
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , snake_case_ )
| 258
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
'''simple docstring'''
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = equationa
lowercase__ , lowercase__ , lowercase__ : int = equationa
# Calculate the determinants of the matrices
lowercase__ : int = aa * ba - aa * ba
lowercase__ : int = ca * ba - ca * ba
lowercase__ : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase__ : Any = determinant_x / determinant
lowercase__ : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 599
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
snake_case : Any = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def lowercase__ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return torch.atana(__UpperCamelCase , __UpperCamelCase ) / math.pi * 2
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = torch.sin(t * math.pi / 2 ) ** 2
__lowercase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCamelCase , __UpperCamelCase )
class lowerCamelCase__( snake_case_ ):
pass
class lowerCamelCase__( nn.Module ):
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
__lowercase = DiffusionAttnUnetaD(__UpperCAmelCase , n_attn_layers=4 )
__lowercase = deepcopy(self.diffusion )
__lowercase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCAmelCase )
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = MODELS_MAP[model_name]["""url"""]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
snake_case : Dict = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
snake_case : Optional[int] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
snake_case : str = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
snake_case : Any = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
snake_case : Tuple = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
snake_case : int = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase__ ( __UpperCamelCase : Any ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ):
return name.replace(__UpperCamelCase , __UpperCamelCase )
elif name.startswith(__UpperCamelCase ):
return [name.replace(__UpperCamelCase , __UpperCamelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str=13 ):
'''simple docstring'''
__lowercase = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__lowercase = 0
if string.startswith("""net.3.""" ):
depth += 1
__lowercase = string[6:]
elif string.startswith("""net.""" ):
__lowercase = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__lowercase = string[7:]
if string.startswith("""main.""" ):
__lowercase = string[5:]
# mid block
if string[:2].isdigit():
__lowercase = string[:2]
__lowercase = string[2:]
else:
__lowercase = string[0]
__lowercase = string[1:]
if depth == max_depth:
__lowercase = MID_NUM_TO_LAYER[layer_num]
__lowercase = """mid_block"""
elif depth > 0 and int(__UpperCamelCase ) < 7:
__lowercase = DOWN_NUM_TO_LAYER[layer_num]
__lowercase = F'''down_blocks.{depth}'''
elif depth > 0 and int(__UpperCamelCase ) > 7:
__lowercase = UP_NUM_TO_LAYER[layer_num]
__lowercase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__lowercase = DEPTH_0_TO_LAYER[layer_num]
__lowercase = F'''up_blocks.{max_depth - 1}''' if int(__UpperCamelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
__lowercase = string_left[1:]
if "resnets" in new_layer:
__lowercase = convert_resconv_naming(__UpperCamelCase )
elif "attentions" in new_layer:
__lowercase = convert_attn_naming(__UpperCamelCase )
__lowercase = new_string_left
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = prefix + """.""" + new_layer + """.""" + string_left
else:
__lowercase = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowercase__ ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__lowercase = rename(__UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = transform_conv_attns(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__lowercase = v
return new_state_dict
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : int ):
'''simple docstring'''
if len(__UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
__lowercase = v[:, :, 0]
else:
# bias
__lowercase = v
else:
# qkv matrices
__lowercase = v.shape[0]
__lowercase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowercase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowercase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__lowercase = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__lowercase = download(__UpperCamelCase )
__lowercase = MODELS_MAP[model_name]["""sample_rate"""]
__lowercase = MODELS_MAP[model_name]["""sample_size"""]
__lowercase = Object()
__lowercase = sample_size
__lowercase = sample_rate
__lowercase = 0
__lowercase = UNetaDModel(sample_size=__UpperCamelCase , sample_rate=__UpperCamelCase )
__lowercase = diffusers_model.state_dict()
__lowercase = DiffusionUncond(__UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCamelCase )["""state_dict"""] )
__lowercase = orig_model.diffusion_ema.eval()
__lowercase = orig_model.state_dict()
__lowercase = rename_orig_weights(__UpperCamelCase )
__lowercase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowercase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCamelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("""kernel""" ) for k in list(__UpperCamelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__lowercase = value.squeeze()
__lowercase = value
diffusers_model.load_state_dict(__UpperCamelCase )
__lowercase = 100
__lowercase = 33
__lowercase = IPNDMScheduler(num_train_timesteps=__UpperCamelCase )
__lowercase = torch.manual_seed(__UpperCamelCase )
__lowercase = torch.randn([1, 2, config.sample_size] , generator=__UpperCamelCase ).to(__UpperCamelCase )
__lowercase = torch.linspace(1 , 0 , steps + 1 , device=__UpperCamelCase )[:-1]
__lowercase = get_crash_schedule(__UpperCamelCase )
__lowercase = DanceDiffusionPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
__lowercase = torch.manual_seed(33 )
__lowercase = pipe(num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase ).audios
__lowercase = sampling.iplms_sample(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {} )
__lowercase = generated.clamp(-1 , 1 )
__lowercase = (generated - audio).abs().sum()
__lowercase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __UpperCamelCase )
print("""Diff max""" , __UpperCamelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
snake_case : int = parser.parse_args()
main(args)
| 339
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 339
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
debug_launcher(test_script.main )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
debug_launcher(test_ops.main )
| 423
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
A_ : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]:
A__ : Optional[Any] = {}
with open(lowercase_ , """r""" ) as file:
for line_number, line in enumerate(lowercase_ ):
A__ : Any = line.strip()
if line:
A__ : Optional[int] = line.split()
A__ : Any = line_number
A__ : Tuple = words[0]
A__ : Union[str, Any] = value
return result
def UpperCamelCase (lowercase_: List[str] , lowercase_: Tuple , lowercase_: int , lowercase_: Optional[int] , lowercase_: Optional[int] ) -> Dict:
for attribute in key.split(""".""" ):
A__ : Optional[int] = getattr(lowercase_ , lowercase_ )
A__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
A__ : Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
A__ : str = """param"""
if weight_type is not None and weight_type != "param":
A__ : List[Any] = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
A__ : Optional[int] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
A__ : Optional[Any] = getattr(lowercase_ , lowercase_ )
A__ : List[Any] = shape_pointer.shape
# let's reduce dimension
A__ : str = value[0]
else:
A__ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ : Any = value
elif weight_type == "weight_g":
A__ : List[str] = value
elif weight_type == "weight_v":
A__ : Union[str, Any] = value
elif weight_type == "bias":
A__ : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
A__ : Dict = getattr(lowercase_ , lowercase_ )
A__ : Tuple = value
else:
A__ : List[str] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase (lowercase_: Any , lowercase_: Optional[Any] , lowercase_: Tuple , lowercase_: Any , lowercase_: Union[str, Any] ) -> Tuple:
A__ : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
A__ : List[Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
A__ : List[str] = """param"""
if weight_type is not None and weight_type != "param":
A__ : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A__ : Union[str, Any] = """.""".join([key, hf_param_name] )
else:
A__ : Optional[Any] = key
A__ : Tuple = value if """lm_head""" in full_key else value[0]
A_ : List[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCamelCase (lowercase_: List[str] , lowercase_: Tuple , lowercase_: Any=None , lowercase_: Optional[int]=None ) -> List[Any]:
A__ : int = False
for key, mapped_key in MAPPING.items():
A__ : int = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : Optional[Any] = True
if "*" in mapped_key:
A__ : List[Any] = name.split(lowercase_ )[0].split(""".""" )[-2]
A__ : int = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
A__ : List[Any] = """weight_g"""
elif "weight_v" in name:
A__ : int = """weight_v"""
elif "bias" in name:
A__ : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : str = """weight"""
else:
A__ : str = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Optional[int] , lowercase_: Any ) -> Tuple:
A__ : List[Any] = []
A__ : Tuple = fairseq_model.state_dict()
A__ : Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ : Any = True
else:
A__ : Union[str, Any] = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: Any , lowercase_: Union[str, Any] , lowercase_: Any ) -> Dict:
A__ : str = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[Any] = name.split(""".""" )
A__ : Optional[int] = int(items[0] )
A__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCamelCase (lowercase_: List[Any] , lowercase_: int , lowercase_: str=None , lowercase_: Tuple=None , lowercase_: List[str]=True , lowercase_: Any=False ) -> str:
if config_path is not None:
A__ : Any = WavaVecaConfig.from_pretrained(lowercase_ )
else:
A__ : int = WavaVecaConfig()
if is_seq_class:
A__ : Optional[Any] = read_txt_into_dict(lowercase_ )
A__ : List[str] = idalabel
A__ : Any = WavaVecaForSequenceClassification(lowercase_ )
A__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
A__ : List[Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ : Tuple = target_dict.pad_index
A__ : Union[str, Any] = target_dict.bos_index
A__ : List[Any] = target_dict.eos_index
A__ : List[str] = len(target_dict.symbols )
A__ : str = os.path.join(lowercase_ , """vocab.json""" )
if not os.path.isdir(lowercase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ : List[str] = 0
A__ : Optional[Any] = 1
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
A__ : List[str] = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , )
A__ : List[Any] = True if config.feat_extract_norm == """layer""" else False
A__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
A__ : str = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
A__ : Optional[Any] = WavaVecaForCTC(lowercase_ )
else:
A__ : str = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
A__ , A__ , A__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A__ : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
A__ : Any = fairseq.tasks.setup_task(lowercase_ )
A__ , A__ , A__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
A__ : Tuple = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_ : Any = parser.parse_args()
A_ : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 456
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669
| 1
|
"""simple docstring"""
import os
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(os.path.dirname(__UpperCamelCase ) + """/grid.txt""" ) as f:
snake_case_ : List[Any] = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
snake_case_ : Optional[Any] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
snake_case_ : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case_ : int = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
snake_case_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case_ : str = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
snake_case_ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case_ : Dict = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
snake_case_ : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case_ : Optional[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 58
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Any = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( A__ ):
"""simple docstring"""
snake_case ="""encodec"""
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=2_4000 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=128 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=1024 , _snake_case=None , _snake_case=True , **_snake_case , ):
_UpperCAmelCase =target_bandwidths
_UpperCAmelCase =sampling_rate
_UpperCAmelCase =audio_channels
_UpperCAmelCase =normalize
_UpperCAmelCase =chunk_length_s
_UpperCAmelCase =overlap
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_filters
_UpperCAmelCase =num_residual_layers
_UpperCAmelCase =upsampling_ratios
_UpperCAmelCase =norm_type
_UpperCAmelCase =kernel_size
_UpperCAmelCase =last_kernel_size
_UpperCAmelCase =residual_kernel_size
_UpperCAmelCase =dilation_growth_rate
_UpperCAmelCase =use_causal_conv
_UpperCAmelCase =pad_mode
_UpperCAmelCase =compress
_UpperCAmelCase =num_lstm_layers
_UpperCAmelCase =trim_right_ratio
_UpperCAmelCase =codebook_size
_UpperCAmelCase =codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**_snake_case )
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def SCREAMING_SNAKE_CASE ( self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 700
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase =tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase =tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase =output[output != -float("inf" )]
_UpperCAmelCase =tf.cast(
tf.where(tf.not_equal(_snake_case , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-1_2 )
tf.debugging.assert_equal(_snake_case , _snake_case )
@require_tf
class _a ( unittest.TestCase , A__ ):
"""simple docstring"""
if is_tf_available():
snake_case ={
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def SCREAMING_SNAKE_CASE ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =2
_UpperCAmelCase =2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
super(_snake_case , self ).__init__()
_UpperCAmelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=_snake_case , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase =[[2, 0], [102, 103]]
_UpperCAmelCase =[[1, 0], [1, 1]]
_UpperCAmelCase =DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"serving_default": dummy_model.serving} )
_UpperCAmelCase =tf.saved_model.load(_snake_case ).signatures["serving_default"]
for batch_size in range(1 , len(_snake_case ) + 1 ):
_UpperCAmelCase ={
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase =serving_func(**_snake_case )["sequences"]
_UpperCAmelCase =test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =1
_UpperCAmelCase =2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
super(_snake_case , self ).__init__()
_UpperCAmelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=_snake_case , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase =[[2], [102, 103]]
_UpperCAmelCase =[[1], [1, 1]]
_UpperCAmelCase =DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"serving_default": dummy_model.serving} )
_UpperCAmelCase =tf.saved_model.load(_snake_case ).signatures["serving_default"]
for input_row in range(len(_snake_case ) ):
_UpperCAmelCase ={
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase =serving_func(**_snake_case )["sequences"]
_UpperCAmelCase =test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=_snake_case )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
super().__init__()
_UpperCAmelCase =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_snake_case , "spiece.model" ) , "rb" ).read() )
_UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def SCREAMING_SNAKE_CASE ( self , _snake_case , *_snake_case , **_snake_case ):
_UpperCAmelCase =self.tokenizer.tokenize(_snake_case )
_UpperCAmelCase , _UpperCAmelCase =text.pad_model_inputs(
_snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase =self.model.generate(input_ids=_snake_case , attention_mask=_snake_case )
return self.tokenizer.detokenize(_snake_case )
_UpperCAmelCase =CompleteSentenceTransformer()
_UpperCAmelCase =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
_UpperCAmelCase =complete_model(_snake_case )
_UpperCAmelCase =tf.keras.Model(_snake_case , _snake_case )
keras_model.save(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
_UpperCAmelCase =14
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase ="Hello, my dog is cute and"
_UpperCAmelCase =tokenizer(_snake_case , return_tensors="tf" )
_UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
_UpperCAmelCase =model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase =[638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
_UpperCAmelCase =model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE ( self ):
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
_UpperCAmelCase ="Hugging Face is a technology company based in New York and Paris."
_UpperCAmelCase =bart_tokenizer(_snake_case , return_tensors="tf" ).input_ids
_UpperCAmelCase =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
_UpperCAmelCase =bart_model.generate(_snake_case ).numpy()
class _a ( A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=None , **_snake_case ):
return super().call(_snake_case , **_snake_case )
_UpperCAmelCase =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
_UpperCAmelCase =bart_model.generate(_snake_case , foo="bar" ).numpy()
self.assertTrue(np.array_equal(_snake_case , _snake_case ) )
class _a ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , **_snake_case ):
return super().call(_snake_case , **_snake_case )
_UpperCAmelCase =FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase =bart_model.generate(_snake_case ).numpy()
with self.assertRaises(_snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_snake_case , foo="bar" )
| 592
| 0
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a_ = logging.getLogger(__name__)
def __lowercase ( snake_case_ : torch.nn.Module ,snake_case_ : BnbQuantizationConfig ,snake_case_ : Union[str, os.PathLike] = None ,snake_case_ : Optional[Dict[str, Union[int, str, torch.device]]] = None ,snake_case_ : Optional[List[str]] = None ,snake_case_ : Optional[Dict[Union[int, str], Union[int, str]]] = None ,snake_case_ : Optional[Union[str, os.PathLike]] = None ,snake_case_ : bool = False ,) ->Optional[Any]:
'''simple docstring'''
__A : Any = bnb_quantization_config.load_in_abit
__A : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__A : Dict = []
# custom device map
if isinstance(A__ ,A__ ) and len(device_map.keys() ) > 1:
__A : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__A : Optional[Any] = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
__A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__A : List[str] = []
__A : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
__A : Tuple = load_in_abit
__A : Tuple = load_in_abit
__A : Union[str, Any] = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__A : List[Any] = replace_with_bnb_layers(A__ ,A__ ,modules_to_not_convert=A__ )
# convert param to the right dtype
__A : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__A : Optional[int] = name.replace('''.weight''' ,'''''' ).replace('''.bias''' ,'''''' )
__A : Dict = getattr(A__ ,A__ ,A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__A : Union[str, Any] = replace_with_bnb_layers(
A__ ,A__ ,modules_to_not_convert=A__ )
__A : Optional[int] = get_quantized_model_device_map(
A__ ,A__ ,A__ ,max_memory=A__ ,no_split_module_classes=A__ ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__A : Tuple = True
__A : Union[str, Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A__ ,A__ ,A__ ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=A__ ,offload_state_dict=A__ ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(A__ ,device_map=A__ ,offload_dir=A__ )
def __lowercase ( snake_case_ : Any ,snake_case_ : str ,snake_case_ : List[Any]=None ,snake_case_ : List[Any]=None ,snake_case_ : Dict=None ) ->Optional[int]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
__A : Optional[Any] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A__ ,A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__A : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__A : Any = {}
__A : Tuple = special_dtypes
__A : Tuple = no_split_module_classes
__A : Dict = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__A : List[str] = get_balanced_memory(
A__ ,low_zero=(device_map == '''balanced_low_0''') ,max_memory=A__ ,**A__ ,)
__A : str = max_memory
__A : Tuple = infer_auto_device_map(A__ ,**A__ )
if isinstance(A__ ,A__ ):
# check if don't have any quantized module on the cpu
__A : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__A : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __lowercase ( snake_case_ : Union[str, Any] ,snake_case_ : Union[str, Any] ,snake_case_ : List[Any]=None ,snake_case_ : Optional[int]=None ) ->List[str]:
'''simple docstring'''
if modules_to_not_convert is None:
__A : str = []
__A , __A : Any = _replace_with_bnb_layers(
A__ ,A__ ,A__ ,A__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __lowercase ( snake_case_ : int ,snake_case_ : str ,snake_case_ : Dict=None ,snake_case_ : List[str]=None ,) ->Tuple:
'''simple docstring'''
__A : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
__A : Optional[Any] = []
current_key_name.append(A__ )
if isinstance(A__ ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__A : int = '''.'''.join(A__ )
__A : Union[str, Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__A : Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__A : Any = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=A__ ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
__A : List[str] = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__A : Dict = module.weight.data
if module.bias is not None:
__A : str = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ ,A__ ,A__ )
__A : List[str] = True
if len(list(module.children() ) ) > 0:
__A , __A : Dict = _replace_with_bnb_layers(
A__ ,A__ ,A__ ,A__ )
__A : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( snake_case_ : Optional[int] ) ->List[str]:
'''simple docstring'''
with init_empty_weights():
__A : int = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__A : Optional[Any] = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ ,A__ ):
__A : Optional[int] = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
__A : Dict = sum(A__ ,[] )
__A : Union[str, Any] = len(A__ ) > 0
# Check if it is a base model
__A : int = False
if hasattr(A__ ,'''base_model_prefix''' ):
__A : Union[str, Any] = not hasattr(A__ ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__A : Optional[Any] = list(model.named_children() )
__A : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
__A : int = set(A__ ) - set(A__ )
__A : Any = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
__A : Any = ['''.weight''', '''.bias''']
__A : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__A : Tuple = name.replace(A__ ,'''''' )
filtered_module_names.append(A__ )
return filtered_module_names
def __lowercase ( snake_case_ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
for m in model.modules():
if isinstance(A__ ,bnb.nn.Linearabit ):
return True
return False
def __lowercase ( snake_case_ : nn.Module ) ->Optional[int]:
'''simple docstring'''
return next(parameter.parameters() ).device
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : int ,snake_case_ : str ,snake_case_ : int ,snake_case_ : Optional[Any] ,snake_case_ : Optional[int] ,snake_case_ : str ) ->int:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A__ ,A__ ,0 ,dtype=A__ ,value=A__ )
__A : Dict = param_name
__A : Optional[int] = model
if "." in tensor_name:
__A : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
__A : Any = getattr(A__ ,A__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__A : List[Any] = new_module
__A : int = splits[-1]
# offload weights
__A : Optional[Any] = False
offload_weight(module._parameters[tensor_name] ,A__ ,A__ ,index=A__ )
if hasattr(module._parameters[tensor_name] ,'''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace('''weight''' ,'''SCB''' ) ,A__ ,index=A__ ,)
else:
offload_weight(A__ ,A__ ,A__ ,index=A__ )
offload_weight(A__ ,param_name.replace('''weight''' ,'''SCB''' ) ,A__ ,index=A__ )
set_module_tensor_to_device(A__ ,A__ ,'''meta''' ,dtype=A__ ,value=torch.empty(*param.size() ) )
| 177
|
def _lowerCAmelCase ( A__: str , A__: Tuple ):
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(A__ ):
for j in range(A__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _lowerCAmelCase ( A__: List[Any] , A__: Any ):
'''simple docstring'''
UpperCAmelCase = [[float('''inf''' ) for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
UpperCAmelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(A__ ):
# looping through rows of graph array
for i in range(A__ ):
# looping through columns of graph array
for j in range(A__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase = dist[i][k] + dist[k][j]
_print_dist(A__ , A__ )
return dist, v
if __name__ == "__main__":
__magic_name__ = int(input("Enter number of vertices: "))
__magic_name__ = int(input("Enter number of edges: "))
__magic_name__ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__magic_name__ = int(input("Enter source:"))
__magic_name__ = int(input("Enter destination:"))
__magic_name__ = float(input("Enter weight:"))
__magic_name__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 254
| 0
|
import os
from pathlib import Path
def __lowerCAmelCase ( ) -> Dict:
from torch.utils.cpp_extension import load
lowerCamelCase_ = Path(UpperCAmelCase__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase_ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , UpperCAmelCase__ , with_cuda=UpperCAmelCase__ , extra_include_paths=[str(UpperCAmelCase__ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 721
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A:
def __init__( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Dict=7 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : Any=3 , __UpperCamelCase : int=4 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Union[str, Any]=0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFDPRContextEncoder(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFDPRQuestionEncoder(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ):
lowerCamelCase_ = TFDPRReader(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Dict ):
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCamelCase )
@slow
def lowercase__ ( self : Optional[int] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __A( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCamelCase_ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(__UpperCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103
| 0
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_a = True
except ImportError:
_a = False
try:
from torch.hub import _get_torch_home
_a = _get_torch_home()
except ImportError:
_a = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
_a = os.path.join(torch_cache_home, "transformers")
_a = 'https://cdn.huggingface.co'
_a = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_a = '/'.join(str(Path(__file__).resolve()).split("/")[:-1])
_a = os.path.join(PATH, "config.yaml")
_a = os.path.join(PATH, "attributes.txt")
_a = os.path.join(PATH, "objects.txt")
_a = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
_a = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
_a = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
_a = 'pytorch_model.bin'
_a = 'config.yaml'
def lowerCAmelCase__(__snake_case=OBJECTS ,__snake_case=ATTRIBUTES ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = []
with open(A__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase__ = []
with open(A__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
with open(A__ ,'''rb''' ) as f:
lowerCamelCase__ = pkl.load(A__ )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase__ = ckp.pop(A__ )
if isinstance(A__ ,np.ndarray ):
lowerCamelCase__ = torch.tensor(A__ )
else:
assert isinstance(A__ ,torch.tensor ), type(A__ )
lowerCamelCase__ = v
return r
class __A :
'''simple docstring'''
lowerCAmelCase_ = {}
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = "root" , __lowerCAmelCase=0 ):
'''simple docstring'''
lowerCamelCase__ = name
lowerCamelCase__ = level
lowerCamelCase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase__ = copy.deepcopy(lowerCAmelCase__ )
lowerCamelCase__ = copy.deepcopy(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCamelCase__ = Config(lowerCAmelCase__ , name=lowerCAmelCase__ , level=level + 1 )
lowerCamelCase__ = v
setattr(self , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase__ = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = val
lowerCamelCase__ = val
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ = len(lowerCAmelCase__ ) - 1
lowerCamelCase__ = self._pointer
if len(lowerCAmelCase__ ) > 1:
for i, l in enumerate(lowerCAmelCase__ ):
if hasattr(self , lowerCAmelCase__ ) and isinstance(getattr(self , lowerCAmelCase__ ) , lowerCAmelCase__ ):
setattr(getattr(self , lowerCAmelCase__ ) , '''.'''.join(levels[i:] ) , lowerCAmelCase__ )
if l == last_level:
lowerCamelCase__ = val
else:
lowerCamelCase__ = pointer[l]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._pointer
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
with open(lowerCAmelCase__ ) as stream:
lowerCamelCase__ = load(lowerCAmelCase__ , Loader=lowerCAmelCase__ )
return data
def __str__( self ):
'''simple docstring'''
lowerCamelCase__ = ' '
if self._name != "root":
lowerCamelCase__ = F'{t * (self._level-1)}{self._name}:\n'
else:
lowerCamelCase__ = ''
lowerCamelCase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowerCAmelCase__ ).__name__})\n'
lowerCamelCase__ = level
return r[:-1]
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(lowerCAmelCase__ )
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''cache_dir''' , lowerCAmelCase__ )
lowerCamelCase__ = kwargs.pop('''force_download''' , lowerCAmelCase__ )
lowerCamelCase__ = kwargs.pop('''resume_download''' , lowerCAmelCase__ )
lowerCamelCase__ = kwargs.pop('''proxies''' , lowerCAmelCase__ )
lowerCamelCase__ = kwargs.pop('''local_files_only''' , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
lowerCamelCase__ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
elif os.path.isfile(lowerCAmelCase__ ) or is_remote_url(lowerCAmelCase__ ):
lowerCamelCase__ = pretrained_model_name_or_path
else:
lowerCamelCase__ = hf_bucket_url(lowerCAmelCase__ , filename=lowerCAmelCase__ , use_cdn=lowerCAmelCase__ )
try:
# Load from URL or cache if already cached
lowerCamelCase__ = cached_path(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase__ = Config.load_yaml(lowerCAmelCase__ )
except EnvironmentError:
lowerCamelCase__ = 'Can\'t load config for'
raise EnvironmentError(lowerCAmelCase__ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(lowerCAmelCase__ ), kwargs
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase__ = in_tensor.numpy()
lowerCamelCase__ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(A__ ,A__ ,rtol=0.0_1 ,atol=0.1 ), (
F'{sum([1 for x in np.isclose(A__ ,A__ ,rtol=0.0_1 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = urlparse(A__ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=True ) -> int:
'''simple docstring'''
lowerCamelCase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase__ = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=None ,__snake_case=0 ,__snake_case=None ,) -> str:
'''simple docstring'''
lowerCamelCase__ = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A__ ,A__ ):
ua += "; " + "; ".join('''{}/{}'''.format(A__ ,A__ ) for k, v in user_agent.items() )
elif isinstance(A__ ,A__ ):
ua += "; " + user_agent
lowerCamelCase__ = {'user-agent': ua}
if resume_size > 0:
lowerCamelCase__ = 'bytes=%d-' % (resume_size,)
lowerCamelCase__ = requests.get(A__ ,stream=A__ ,proxies=A__ ,headers=A__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase__ = response.headers.get('''Content-Length''' )
lowerCamelCase__ = resume_size + int(A__ ) if content_length is not None else None
lowerCamelCase__ = tqdm(
unit='''B''' ,unit_scale=A__ ,total=A__ ,initial=A__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A__ ) )
temp_file.write(A__ )
progress.close()
def lowerCAmelCase__(__snake_case ,__snake_case=None ,__snake_case=False ,__snake_case=None ,__snake_case=10 ,__snake_case=False ,__snake_case=None ,__snake_case=False ,) -> int:
'''simple docstring'''
if cache_dir is None:
lowerCamelCase__ = TRANSFORMERS_CACHE
if isinstance(A__ ,A__ ):
lowerCamelCase__ = str(A__ )
os.makedirs(A__ ,exist_ok=A__ )
lowerCamelCase__ = None
if not local_files_only:
try:
lowerCamelCase__ = requests.head(A__ ,allow_redirects=A__ ,proxies=A__ ,timeout=A__ )
if response.status_code == 200:
lowerCamelCase__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase__ = url_to_filename(A__ ,A__ )
# get cache path to put the file
lowerCamelCase__ = os.path.join(A__ ,A__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A__ ):
return cache_path
else:
lowerCamelCase__ = [
file
for file in fnmatch.filter(os.listdir(A__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(A__ ) > 0:
return os.path.join(A__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(A__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase__ = cache_path + '.lock'
with FileLock(A__ ):
# If the download just completed while the lock was activated.
if os.path.exists(A__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase__ = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(A__ ,'''a+b''' ) as f:
yield f
lowerCamelCase__ = _resumable_file_manager
if os.path.exists(A__ ):
lowerCamelCase__ = os.stat(A__ ).st_size
else:
lowerCamelCase__ = 0
else:
lowerCamelCase__ = partial(tempfile.NamedTemporaryFile ,dir=A__ ,delete=A__ )
lowerCamelCase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,A__ ,temp_file.name ,)
http_get(
A__ ,A__ ,proxies=A__ ,resume_size=A__ ,user_agent=A__ ,)
os.replace(temp_file.name ,A__ )
lowerCamelCase__ = {'url': url, 'etag': etag}
lowerCamelCase__ = cache_path + '.json'
with open(A__ ,'''w''' ) as meta_file:
json.dump(A__ ,A__ )
return cache_path
def lowerCAmelCase__(__snake_case ,__snake_case=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = url.encode('''utf-8''' )
lowerCamelCase__ = shaaaa(A__ )
lowerCamelCase__ = url_hash.hexdigest()
if etag:
lowerCamelCase__ = etag.encode('''utf-8''' )
lowerCamelCase__ = shaaaa(A__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowerCAmelCase__(__snake_case ,__snake_case=None ,__snake_case=False ,__snake_case=None ,__snake_case=False ,__snake_case=None ,__snake_case=False ,__snake_case=False ,__snake_case=False ,) -> Tuple:
'''simple docstring'''
if cache_dir is None:
lowerCamelCase__ = TRANSFORMERS_CACHE
if isinstance(A__ ,A__ ):
lowerCamelCase__ = str(A__ )
if isinstance(A__ ,A__ ):
lowerCamelCase__ = str(A__ )
if is_remote_url(A__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase__ = get_from_cache(
A__ ,cache_dir=A__ ,force_download=A__ ,proxies=A__ ,resume_download=A__ ,user_agent=A__ ,local_files_only=A__ ,)
elif os.path.exists(A__ ):
# File, and it exists.
lowerCamelCase__ = url_or_filename
elif urlparse(A__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(A__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(A__ ) )
if extract_compressed_file:
if not is_zipfile(A__ ) and not tarfile.is_tarfile(A__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase__ = os.path.split(A__ )
lowerCamelCase__ = output_file.replace('''.''' ,'''-''' ) + '-extracted'
lowerCamelCase__ = os.path.join(A__ ,A__ )
if os.path.isdir(A__ ) and os.listdir(A__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase__ = output_path + '.lock'
with FileLock(A__ ):
shutil.rmtree(A__ ,ignore_errors=A__ )
os.makedirs(A__ )
if is_zipfile(A__ ):
with ZipFile(A__ ,'''r''' ) as zip_file:
zip_file.extractall(A__ )
zip_file.close()
elif tarfile.is_tarfile(A__ ):
lowerCamelCase__ = tarfile.open(A__ )
tar_file.extractall(A__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(A__ ) )
return output_path_extracted
return output_path
def lowerCAmelCase__(__snake_case ,__snake_case="," ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(A__ ,A__ )
if os.path.isfile(A__ ):
with open(A__ ) as f:
lowerCamelCase__ = eval(f.read() )
else:
lowerCamelCase__ = requests.get(A__ )
try:
lowerCamelCase__ = requests.json()
except Exception:
lowerCamelCase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase__ = eval(A__ )
except Exception:
lowerCamelCase__ = data.split('''\n''' )
req.close()
return data
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = requests.get(A__ )
lowerCamelCase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A__ )
with open(A__ ,'''rb''' ) as stream:
lowerCamelCase__ = pkl.load(A__ )
lowerCamelCase__ = weights.pop('''model''' )
lowerCamelCase__ = {}
for k, v in model.items():
lowerCamelCase__ = torch.from_numpy(A__ )
if "running_var" in k:
lowerCamelCase__ = torch.tensor([0] )
lowerCamelCase__ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase__ = zero
return new
def lowerCAmelCase__() -> str:
'''simple docstring'''
print(F'{os.path.abspath(os.path.join(A__ ,os.pardir ) )}/demo.ipynb' )
def lowerCAmelCase__(__snake_case ,__snake_case="RGB" ) -> int:
'''simple docstring'''
assert isinstance(A__ ,A__ )
if os.path.isfile(A__ ):
lowerCamelCase__ = cva.imread(A__ )
else:
lowerCamelCase__ = get_image_from_url(A__ )
assert img is not None, F'could not connect to: {im}'
lowerCamelCase__ = cva.cvtColor(A__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase__ = img[:, :, ::-1]
return img
def lowerCAmelCase__(__snake_case ,__snake_case=1 ) -> Union[str, Any]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 ,len(A__ ) ,A__ ))
| 481
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """git_vision_model"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """git"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1_0_1 , lowerCAmelCase__=1_0_2 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE_ : str = GitVisionConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE_ : List[str] = use_cache
SCREAMING_SNAKE_CASE_ : Any = tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Dict = num_image_with_embedding
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : str = eos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 101
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
# setable values
lowercase__ = None
lowercase__ = None
lowercase__ = None # sigma(t_i)
@classmethod
def _UpperCAmelCase ( cls : Any):
"""simple docstring"""
return cls()
@dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Union[str, Any] , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 1_0_0 , lowerCAmelCase_ : float = 1.007 , lowerCAmelCase_ : float = 8_0 , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 5_0 , ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = ()):
"""simple docstring"""
lowercase_ = jnp.arange(0 , lowerCAmelCase_)[::-1].copy()
lowercase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa) , timesteps=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowercase_ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
lowercase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase_ = random.split(lowerCAmelCase_ , num=1)
lowercase_ = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape)
lowercase_ = sigma + gamma * sigma
lowercase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = sample_hat + sigma_hat * model_output
lowercase_ = (sample_hat - pred_original_sample) / sigma_hat
lowercase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = sample_prev + sigma_prev * model_output
lowercase_ = (sample_prev - pred_original_sample) / sigma_prev
lowercase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
raise NotImplementedError()
| 714
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 100
| 0
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = parent
def a__ ( self ) -> List[str]:
return {}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
UpperCAmelCase_ : List[str] = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = MarkupLMFeatureExtractor if is_bsa_available() else None
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = MarkupLMFeatureExtractionTester(self )
@property
def a__ ( self ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def a__ ( self ) -> int:
# Initialize feature_extractor
UpperCAmelCase_ : Dict = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = get_html_strings()[0]
UpperCAmelCase_ : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
UpperCAmelCase_ : List[Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
UpperCAmelCase_ : Any = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes ,_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths ,_SCREAMING_SNAKE_CASE )
# Test batched
UpperCAmelCase_ : str = get_html_strings()
UpperCAmelCase_ : Tuple = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
UpperCAmelCase_ : str = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
UpperCAmelCase_ : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths ,_SCREAMING_SNAKE_CASE )
| 30
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 30
| 1
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
try:
_snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
snake_case = parse_flag_from_env('''RUN_SLOW''', default=False)
def snake_case ( lowerCAmelCase_ ) -> Any:
return unittest.skip('''Test was skipped''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> str:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Tuple:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Optional[int]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Any:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> int:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> int:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> int:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Dict:
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __lowerCAmelCase ) , f"""test requires torch version >= {version}""" )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> Dict:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__lowerCAmelCase )
def snake_case ( lowerCAmelCase_ ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__lowerCAmelCase )
snake_case = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__lowerCAmelCase )
class UpperCAmelCase ( unittest.TestCase ):
A__ : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls : int ):
"""simple docstring"""
_snake_case = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = AcceleratorState()
_snake_case = tensor[None].clone().to(state.device )
_snake_case = gather(__lowerCAmelCase ).cpu()
_snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class UpperCAmelCase :
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = returncode
_snake_case = stdout
_snake_case = stderr
async def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
while True:
_snake_case = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(__lowerCAmelCase ) )
_snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case = []
_snake_case = []
def tee(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="" ):
_snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase_ : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase_ : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=180 , lowerCAmelCase_=False , lowerCAmelCase_=True ) -> _RunOutput:
_snake_case = asyncio.get_event_loop()
_snake_case = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
_snake_case = """ """.join(__lowerCAmelCase )
if result.returncode > 0:
_snake_case = """\n""".join(result.stderr )
raise RuntimeError(
f"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class UpperCAmelCase ( UpperCamelCase_ ):
pass
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> str:
try:
_snake_case = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , '''decode''' ):
_snake_case = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 712
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
_snake_case = start
_snake_case = end
_snake_case = val
_snake_case = (start + end) // 2
_snake_case = left
_snake_case = right
def __repr__( self : List[str] ):
"""simple docstring"""
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class UpperCAmelCase :
def __init__( self : Dict , __lowerCamelCase : Sequence , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = collection
_snake_case = function
if self.collection:
_snake_case = self._build_tree(0 , len(__lowerCamelCase ) - 1 )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict ):
"""simple docstring"""
self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start] )
_snake_case = (start + end) // 2
_snake_case = self._build_tree(__lowerCamelCase , __lowerCamelCase )
_snake_case = self._build_tree(mid + 1 , __lowerCamelCase )
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val ) , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if node.start == i and node.end == i:
_snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase )
else:
self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase )
_snake_case = self.fn(node.left.val , node.right.val )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if self.root is not None:
_snake_case = Queue()
queue.put(self.root )
while not queue.empty():
_snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 5_0)
snake_case = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 404
| 0
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_UpperCamelCase : Dict = logging.get_logger(__name__)
class _snake_case :
SCREAMING_SNAKE_CASE : List[str] = None
@experimental
def snake_case ( snake_case : List[Any] , snake_case : List[Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> str:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
return _map_with_joblib(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
def snake_case ( snake_case : str , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase = num_proc if num_proc <= len(snake_case ) else len(snake_case )
lowerCAmelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(snake_case ):
lowerCAmelCase = len(snake_case ) // num_proc
lowerCAmelCase = len(snake_case ) % num_proc
lowerCAmelCase = div * index + min(snake_case , snake_case )
lowerCAmelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCAmelCase , lowerCAmelCase = None, None
if not disable_tqdm:
lowerCAmelCase , lowerCAmelCase = (RLock(),), tqdm.set_lock
with Pool(snake_case , initargs=snake_case , initializer=snake_case ) as pool:
lowerCAmelCase = pool.map(snake_case , snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCAmelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(snake_case )} objects' )
return mapped
def snake_case ( snake_case : List[str] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Tuple , snake_case : Dict , snake_case : Any ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=snake_case ):
return joblib.Parallel()(
joblib.delayed(snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case ( snake_case : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCAmelCase = None
| 284
|
'''simple docstring'''
def snake_case ( snake_case : dict ) -> set:
"""simple docstring"""
lowerCAmelCase = set()
# edges = list of graph's edges
lowerCAmelCase = get_edges(snake_case )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase , lowerCAmelCase = edges.pop()
chosen_vertices.add(snake_case )
chosen_vertices.add(snake_case )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(snake_case )
return chosen_vertices
def snake_case ( snake_case : dict ) -> set:
"""simple docstring"""
lowerCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 284
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None )-> None:
'''simple docstring'''
if components is None:
UpperCamelCase = []
UpperCamelCase = list(A_ )
def __len__( self )-> int:
'''simple docstring'''
return len(self.__components )
def __str__( self )-> str:
'''simple docstring'''
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ )-> Vector:
'''simple docstring'''
UpperCamelCase = len(self )
if size == len(A_ ):
UpperCamelCase = [self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ )-> Vector:
'''simple docstring'''
UpperCamelCase = len(self )
if size == len(A_ ):
UpperCamelCase = [self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ )-> Vector:
'''simple docstring'''
...
@overload
def __mul__( self , A_ )-> float:
'''simple docstring'''
...
def __mul__( self , A_ )-> float | Vector:
'''simple docstring'''
if isinstance(A_ , (float, int) ):
UpperCamelCase = [c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
UpperCamelCase = len(self )
UpperCamelCase = [self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self )-> Vector:
'''simple docstring'''
return Vector(self.__components )
def UpperCAmelCase_ ( self , A_ )-> float:
'''simple docstring'''
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , A_ , A_ )-> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase = value
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
UpperCamelCase = [c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> float:
'''simple docstring'''
UpperCamelCase = self * other
UpperCamelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A_( A : int):
'''simple docstring'''
assert isinstance(A , A)
return Vector([0] * dimension)
def A_( A : int , A : int):
'''simple docstring'''
assert isinstance(A , A) and (isinstance(A , A))
UpperCamelCase = [0] * dimension
UpperCamelCase = 1
return Vector(A)
def A_( A : float , A : Vector , A : Vector):
'''simple docstring'''
assert (
isinstance(A , A)
and isinstance(A , A)
and (isinstance(A , (int, float)))
)
return x * scalar + y
def A_( A : int , A : int , A : int):
'''simple docstring'''
random.seed(A)
UpperCamelCase = [random.randint(A , A) for _ in range(A)]
return Vector(A)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ )-> None:
'''simple docstring'''
UpperCamelCase = matrix
UpperCamelCase = w
UpperCamelCase = h
def __str__( self )-> str:
'''simple docstring'''
UpperCamelCase = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ )-> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase = []
for i in range(self.__height ):
UpperCamelCase = [
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ )-> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase = []
for i in range(self.__height ):
UpperCamelCase = [
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ )-> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self , A_ )-> Vector:
'''simple docstring'''
...
def __mul__( self , A_ )-> Vector | Matrix:
'''simple docstring'''
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
UpperCamelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase = [
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
UpperCamelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return self.__height
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return self.__width
def UpperCAmelCase_ ( self , A_ , A_ )-> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , A_ , A_ )-> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
UpperCamelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
UpperCamelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , A_ , A_ )-> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase = [
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def A_( A : int):
'''simple docstring'''
UpperCamelCase = [[0] * n for _ in range(A)]
return Matrix(A , A , A)
def A_( A : int , A : int , A : int , A : int):
'''simple docstring'''
random.seed(A)
UpperCamelCase = [
[random.randint(A , A) for _ in range(A)] for _ in range(A)
]
return Matrix(A , A , A)
| 700
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase : int = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : int = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase : Optional[int] = 'transformer'
lowerCAmelCase : str = model.state_dict()
lowerCAmelCase : List[str] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase : Any = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
lowerCAmelCase : str = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase : List[Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
lowerCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase : int = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowerCAmelCase : Union[str, Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase : Any = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : List[str] = state_dict[f"""lm_head.dense.{w}"""]
lowerCAmelCase : Any = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase : Dict = state_dict[f"""{prefix}.ln_f.{w}"""]
lowerCAmelCase : Tuple = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 432
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.