code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = [[1, 2, 4], [1, 2, 3, 4]]
_a = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase__ ( self : Dict ):
_a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCamelCase__ ( self : Optional[int] ):
_a = [[1, 2, 3], [1, 2, 4]]
_a = DisjunctiveConstraint(_A )
_a = dc.update(1 )
_a = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a = dc.update(2 )
_a = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a = dc.update(3 )
_a = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase__ ( self : Any ):
_a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_a = DisjunctiveConstraint(_A )
_a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 692
|
"""simple docstring"""
import math
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return math.pow(SCREAMING_SNAKE_CASE , 2 ) - a
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return 2 * x
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = 2.0
while start <= a:
UpperCamelCase : Optional[Any] = math.pow(SCREAMING_SNAKE_CASE , 2 )
return start
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 9999 , SCREAMING_SNAKE_CASE = 0.00_00_00_00_00_00_01 ):
if a < 0:
raise ValueError("""math domain error""" )
UpperCamelCase : Optional[Any] = get_initial_point(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = value
UpperCamelCase : Tuple = value - fx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / fx_derivative(SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102
| 0
|
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if index == r:
for j in range(__UpperCAmelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ = arr[i]
combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 0 , __UpperCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCamelCase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 561
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =GPTSwaTokenizer
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Dict =True
UpperCamelCase__ : Union[str, Any] =False
def __a ( self :Optional[Any]) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[Any] , _lowercase :Optional[int]) -> Union[str, Any]:
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = '''This is a test'''
return input_text, output_text
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(_lowercase) , 2000)
def __a ( self :List[str]) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [465, 287, 265, 631, 842])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowercase , _lowercase):
self.assertListEqual(tokenizer.encode_fast(_lowercase) , _lowercase)
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowercase , _lowercase):
self.assertEqual(tokenizer.decode_fast(_lowercase) , _lowercase)
@slow
def __a ( self :int) -> List[str]:
UpperCAmelCase_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_lowercase , )
| 561
| 1
|
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase : Dict = 'src/transformers'
__lowerCAmelCase : List[Any] = 'docs/source/en'
__lowerCAmelCase : str = '.'
def a__ ( A_, A_, A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
# Find the start prompt.
__magic_name__ = 0
while not lines[start_index].startswith(A_ ):
start_index += 1
start_index += 1
__magic_name__ = start_index
while not lines[end_index].startswith(A_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase : str = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : List[str] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__lowerCAmelCase : Any = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", A_ )
return [m.group(0 ) for m in matches]
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = 2 if text == """✅""" or text == """❌""" else len(A_ )
__magic_name__ = (width - text_length) // 2
__magic_name__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a__ ( ):
'''simple docstring'''
__magic_name__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__magic_name__ = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(A_ ):
__magic_name__ = None
if attr_name.endswith("""Tokenizer""" ):
__magic_name__ = slow_tokenizers
__magic_name__ = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
__magic_name__ = fast_tokenizers
__magic_name__ = attr_name[:-13]
elif _re_tf_models.match(A_ ) is not None:
__magic_name__ = tf_models
__magic_name__ = _re_tf_models.match(A_ ).groups()[0]
elif _re_flax_models.match(A_ ) is not None:
__magic_name__ = flax_models
__magic_name__ = _re_flax_models.match(A_ ).groups()[0]
elif _re_pt_models.match(A_ ) is not None:
__magic_name__ = pt_models
__magic_name__ = _re_pt_models.match(A_ ).groups()[0]
if lookup_dict is not None:
while len(A_ ) > 0:
if attr_name in model_name_to_prefix.values():
__magic_name__ = True
break
# Try again after removing the last word in the name
__magic_name__ = """""".join(camel_case_split(A_ )[:-1] )
# Let's build that table!
__magic_name__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__magic_name__ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__magic_name__ = [len(A_ ) + 2 for c in columns]
__magic_name__ = max([len(A_ ) for name in model_names] ) + 2
# Build the table per se
__magic_name__ = """|""" + """|""".join([_center_text(A_, A_ ) for c, w in zip(A_, A_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
__magic_name__ = {True: """✅""", False: """❌"""}
for name in model_names:
__magic_name__ = model_name_to_prefix[name]
__magic_name__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(A_, A_ ) for l, w in zip(A_, A_ )] ) + "|\n"
return table
def a__ ( A_=False ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = _find_text_in_file(
filename=os.path.join(A_, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
__magic_name__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(A_, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 529
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__lowerCamelCase : Optional[Any] = f"https://www.google.com/search?q={query}&num=100"
__lowerCamelCase : Optional[int] = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__lowerCamelCase : Dict = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 459
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase :
UpperCAmelCase : int
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Node(1 )
lowercase = Node(2 )
lowercase = Node(3 )
lowercase = Node(4 )
lowercase = Node(5 )
return tree
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
if root is None:
return output
lowercase = deque([root] )
while process_queue:
lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
def populate_output(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
def populate_output(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if root is None:
return []
lowercase = []
lowercase = 0
lowercase = height(lowerCAmelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowercase = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowercase = 0
return output
def UpperCAmelCase_ ( ): # Main function for testing.
"""simple docstring"""
lowercase = make_tree()
print(f'In-order Traversal: {inorder(lowerCAmelCase_ )}' )
print(f'Pre-order Traversal: {preorder(lowerCAmelCase_ )}' )
print(f'Post-order Traversal: {postorder(lowerCAmelCase_ )}' , "\n" )
print(f'Height of Tree: {height(lowerCAmelCase_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCAmelCase_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCAmelCase_ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase_ , level=lowerCAmelCase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 459
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['LayoutLMv3FeatureExtractor']
_UpperCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 459
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = 'docs/source/en/_toctree.yml'
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Optional[int] = defaultdict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCAmelCase )
__lowerCamelCase : Dict = new_doc_list
__lowerCamelCase : Optional[Any] = [key for key, value in counts.items() if value > 1]
__lowerCamelCase : int = []
for duplicate_key in duplicates:
__lowerCamelCase : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowerCamelCase : Dict = sorted(_lowerCAmelCase ,key=lambda _lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCAmelCase )
# Sort
return overview_doc
def a_ ( _lowerCAmelCase=False ) -> Optional[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCamelCase : List[Any] = api_doc[scheduler_idx]['sections']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
__lowerCamelCase : str = False
if new_scheduler_doc != scheduler_doc:
__lowerCamelCase : int = True
if overwrite:
__lowerCamelCase : Any = new_scheduler_doc
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( _lowerCAmelCase=False ) -> List[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : Optional[Any] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCamelCase : int = False
__lowerCamelCase : str = api_doc[pipeline_idx]['sections']
__lowerCamelCase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCamelCase : str = pipeline_doc['section']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
if overwrite:
__lowerCamelCase : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCAmelCase )
# sort overall pipeline doc
__lowerCamelCase : int = clean_doc_toc(_lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__lowerCamelCase : Tuple = True
if overwrite:
__lowerCamelCase : int = new_pipeline_docs
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 459
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if curr_ind == len(lowercase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowercase__ ) ):
if valid_connection(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Insert current vertex into path as next transition
a_ =next_ver
# Validate created path
if util_hamilton_cycle(lowercase__ , lowercase__ , curr_ind + 1 ):
return True
# Backtrack
a_ =-1
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 0 ):
'''simple docstring'''
a_ =[-1] * (len(lowercase__ ) + 1)
# initialize start and end of path with starting index
a_ =a_ =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowercase__ , lowercase__ , 1 ) else []
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = F'''{sampling_rate}'''
__SCREAMING_SNAKE_CASE : str = '''1'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''f32le'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__SCREAMING_SNAKE_CASE : List[Any] = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_stream[0]
__SCREAMING_SNAKE_CASE : Any = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
__SCREAMING_SNAKE_CASE : Tuple = F'''{sampling_rate}'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''1'''
if format_for_conversion == "s16le":
__SCREAMING_SNAKE_CASE : Optional[Any] = 2
elif format_for_conversion == "f32le":
__SCREAMING_SNAKE_CASE : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__SCREAMING_SNAKE_CASE : str = platform.system()
if system == "Linux":
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''alsa'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''default'''
elif system == "Darwin":
__SCREAMING_SNAKE_CASE : str = '''avfoundation'''
__SCREAMING_SNAKE_CASE : Optional[Any] = ''':0'''
elif system == "Windows":
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''dshow'''
__SCREAMING_SNAKE_CASE : Any = '''default'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__SCREAMING_SNAKE_CASE : int = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
__SCREAMING_SNAKE_CASE : List[str] = stream_chunk_s
else:
__SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
__SCREAMING_SNAKE_CASE : Optional[int] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
__SCREAMING_SNAKE_CASE : Optional[Any] = np.intaa
__SCREAMING_SNAKE_CASE : int = 2
elif format_for_conversion == "f32le":
__SCREAMING_SNAKE_CASE : Optional[Any] = np.floataa
__SCREAMING_SNAKE_CASE : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__SCREAMING_SNAKE_CASE : Any = chunk_length_s / 6
__SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
__SCREAMING_SNAKE_CASE : int = [stride_length_s, stride_length_s]
__SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__SCREAMING_SNAKE_CASE : int = datetime.datetime.now()
__SCREAMING_SNAKE_CASE : int = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
__SCREAMING_SNAKE_CASE : int = np.frombuffer(item['''raw'''] , dtype=lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__SCREAMING_SNAKE_CASE : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : str = b''''''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
__SCREAMING_SNAKE_CASE : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
__SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
__SCREAMING_SNAKE_CASE : int = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__SCREAMING_SNAKE_CASE : Tuple = False
yield item
__SCREAMING_SNAKE_CASE : Any = stride_left
__SCREAMING_SNAKE_CASE : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
__SCREAMING_SNAKE_CASE : int = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__SCREAMING_SNAKE_CASE : int = False
yield item
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
__SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 696
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696
| 1
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class _UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0]
__SCREAMING_SNAKE_CASE : List[str] = [0]
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
__SCREAMING_SNAKE_CASE : Tuple = [6_0]
__SCREAMING_SNAKE_CASE : List[str] = [1_0]
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 3
__SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3]
__SCREAMING_SNAKE_CASE : Optional[Any] = [3, 2, 1]
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 5_0
__SCREAMING_SNAKE_CASE : Tuple = [6_0, 1_0_0, 1_2_0]
__SCREAMING_SNAKE_CASE : Optional[Any] = [1_0, 2_0, 3_0]
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 715
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if number > 0:
raise ValueError("""input must be a negative integer""" )
__SCREAMING_SNAKE_CASE : str = len(bin(_lowerCamelCase )[3:] )
__SCREAMING_SNAKE_CASE : Any = bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE : Optional[int] = (
(
"""1"""
+ """0""" * (binary_number_length - len(_lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : List[str] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: Tuple = b.T
UpperCamelCase_: Tuple = np.sum(np.square(UpperCAmelCase__ ) , axis=1 )
UpperCamelCase_: Optional[Any] = np.sum(np.square(UpperCAmelCase__ ) , axis=0 )
UpperCamelCase_: Optional[int] = np.matmul(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: List[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: List[str] = x.reshape(-1 , 3 )
UpperCamelCase_: Union[str, Any] = squared_euclidean_distance(UpperCAmelCase__ , UpperCAmelCase__ )
return np.argmin(UpperCAmelCase__ , axis=1 )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''pixel_values''']
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = np.array(_lowerCamelCase ) if clusters is not None else None
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: List[Any] = size
UpperCamelCase_: Optional[int] = resample
UpperCamelCase_: str = do_normalize
UpperCamelCase_: str = do_color_quantize
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['height'], size['width']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , ):
UpperCamelCase_: Optional[Any] = rescale(image=_lowerCamelCase , scale=1 / 1_2_7.5 , data_format=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = image - 1
return image
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Tuple = size if size is not None else self.size
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase_: Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase_: Dict = clusters if clusters is not None else self.clusters
UpperCamelCase_: Dict = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[int] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase ) for image in images]
if do_color_quantize:
UpperCamelCase_: Any = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase_: Optional[Any] = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase_: Dict = images.shape[0]
UpperCamelCase_: Any = images.reshape(_lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase_: List[Any] = list(_lowerCamelCase )
else:
UpperCamelCase_: int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: str = {'input_ids': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 57
| 0
|
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Dict = []
_a : List[Any] = 1
while len(__a ) < 1E6:
constant.append(str(__a ) )
i += 1
_a : List[str] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 578
|
def __UpperCAmelCase ( __a : int = 2_000_000 ) -> int:
"""simple docstring"""
_a : List[str] = [0 for i in range(n + 1 )]
_a : Tuple = 1
_a : Tuple = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,__a ):
_a : List[str] = 1
_a : List[Any] = 0
for i in range(__a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 578
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
def __init__( self : Any , A : Any , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = 1_3
_UpperCAmelCase : Union[str, Any] = 7
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Tuple = 9_9
_UpperCAmelCase : Any = 3_2
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : Dict = 3_7
_UpperCAmelCase : Optional[Any] = "gelu"
_UpperCAmelCase : List[str] = 0.1
_UpperCAmelCase : int = 0.1
_UpperCAmelCase : List[str] = 5_1_2
_UpperCAmelCase : Tuple = 1_6
_UpperCAmelCase : Any = 2
_UpperCAmelCase : Optional[int] = 0.02
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : Optional[int] = None
def snake_case_ ( self : Any ):
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : Any ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self : Tuple , A : List[Any] , A : Optional[Any] , A : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] ):
_UpperCAmelCase : List[str] = TFEsmModel(config=A )
_UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Optional[int] = model(A )
_UpperCAmelCase : Any = [input_ids, input_mask]
_UpperCAmelCase : List[Any] = model(A )
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Dict , A : List[Any] , A : Optional[Any] , A : List[str] , A : str , A : List[str] , A : str , A : Optional[Any] , A : Tuple , ):
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = TFEsmModel(config=A )
_UpperCAmelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_UpperCAmelCase : Dict = model(A )
_UpperCAmelCase : int = [input_ids, input_mask]
_UpperCAmelCase : Any = model(A , encoder_hidden_states=A )
# Also check the case where encoder outputs are not passed
_UpperCAmelCase : Any = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : List[Any] , A : Optional[Any] , A : Dict , A : Dict , A : int , A : Dict , A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = TFEsmForMaskedLM(config=A )
_UpperCAmelCase : Union[str, Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Tuple , A : List[Any] , A : Optional[Any] , A : List[str] , A : List[Any] , A : Any , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Optional[int] = TFEsmForTokenClassification(config=A )
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Tuple = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : str ):
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = config_and_inputs
_UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Any = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[str] = TFEsmModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def snake_case_ ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_ ( self : str ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def snake_case_ ( self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = TFEsmModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip("Protein models do not support embedding resizing." )
def snake_case_ ( self : Tuple ):
pass
@unittest.skip("Protein models do not support embedding resizing." )
def snake_case_ ( self : int ):
pass
def snake_case_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCAmelCase : Dict = model.get_bias()
assert isinstance(A , A )
for k, v in name.items():
assert isinstance(A , tf.Variable )
else:
_UpperCAmelCase : List[str] = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : List[Any] = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_UpperCAmelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : str = model(A )[0]
_UpperCAmelCase : Dict = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , A )
# compare the actual values for a slice.
_UpperCAmelCase : Union[str, Any] = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : List[Any] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_UpperCAmelCase : Tuple = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_UpperCAmelCase : Optional[Any] = model(A )[0]
# compare the actual values for a slice.
_UpperCAmelCase : Any = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 289
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowerCAmelCase : Dict = "src/transformers"
# Matches is_xxx_available()
_lowerCAmelCase : List[Any] = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase : Tuple = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase : Optional[int] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_lowerCAmelCase : Tuple = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase : List[str] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase : Tuple = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase : Dict = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase : Any = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase : Optional[int] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_lowerCAmelCase : List[str] = re.compile(r"^\s*try:")
# Catches a line with else:
_lowerCAmelCase : Optional[int] = re.compile(r"^\s*else:")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None:
return None
_UpperCAmelCase : str = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : int = f.readlines()
_UpperCAmelCase : Optional[Any] = 0
while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase : List[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Tuple = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0]
_UpperCAmelCase : int = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
_UpperCAmelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCAmelCase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None:
_UpperCAmelCase : int = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(", " )
_UpperCAmelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None:
_UpperCAmelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(", " )
_UpperCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCAmelCase : List[Any] = lines[line_index]
_UpperCAmelCase : Optional[Any] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCAmelCase : Any = lines[line_index]
_UpperCAmelCase : Optional[int] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE__ : int ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase : Any = []
for key in import_dict_objects.keys():
_UpperCAmelCase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
_UpperCAmelCase : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase : List[Any] = "base imports" if key == "none" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ):
if "__init__.py" in files:
_UpperCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , "__init__.py" )
_UpperCAmelCase : Union[str, Any] = parse_init(SCREAMING_SNAKE_CASE__ )
if objects is not None:
_UpperCAmelCase : int = analyze_results(*SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : Dict = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCAmelCase : List[Any] = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Optional[int] = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE__ )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE__ )
return submodules
_lowerCAmelCase : Any = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def __snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_UpperCAmelCase : Optional[int] = spec.loader.load_module()
_UpperCAmelCase : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : Optional[Any] = "\n".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 289
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
snake_case_ : Tuple =open # noqa: we just need to have a builtin inside this module to test it properly
| 703
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ : Any =['''text''', '''image''', '''audio''']
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
inputs.append(create_inputs(lowerCAmelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
for output in outputs:
if isinstance(lowerCAmelCase__ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(lowerCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(lowerCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class a__ :
def _lowerCamelCase ( self ) -> Dict:
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__A = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowercase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__A = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowerCamelCase ( self ) -> int:
__A = create_inputs(self.tool.inputs )
__A = self.tool(*lowercase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
__A = [outputs]
self.assertListEqual(output_types(lowercase__ ) , self.tool.outputs )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowerCamelCase ( self ) -> Optional[int]:
__A = create_inputs(self.tool.inputs )
__A = self.tool(*lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__A = [outputs]
self.assertEqual(len(lowercase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowercase__ , self.tool.outputs ):
__A = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowercase__ , lowercase__ ) )
def _lowerCamelCase ( self ) -> Any:
__A = create_inputs(self.tool.inputs )
__A = []
for _input, input_type in zip(lowercase__ , self.tool.inputs ):
if isinstance(lowercase__ , lowercase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__A = self.tool(*lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__A = [outputs]
self.assertEqual(len(lowercase__ ) , len(self.tool.outputs ) )
| 205
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class a__ ( __magic_name__ ):
lowercase_ = "gpt_bigcode"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , UpperCamelCase_ : Tuple=50257 , UpperCamelCase_ : Union[str, Any]=1024 , UpperCamelCase_ : Any=768 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any="gelu_pytorch_tanh" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=1e-5 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[int]=50256 , UpperCamelCase_ : Optional[int]=50256 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=True , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Union[str, Any] = n_positions
__UpperCAmelCase : int = n_embd
__UpperCAmelCase : Tuple = n_layer
__UpperCAmelCase : str = n_head
__UpperCAmelCase : List[Any] = n_inner
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Optional[int] = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : Union[str, Any] = attn_pdrop
__UpperCAmelCase : List[Any] = layer_norm_epsilon
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = scale_attn_weights
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : int = attention_softmax_in_fpaa
__UpperCAmelCase : Optional[Any] = scale_attention_softmax_in_fpaa
__UpperCAmelCase : Union[str, Any] = multi_query
__UpperCAmelCase : str = bos_token_id
__UpperCAmelCase : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
| 77
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase ( ctypes.Structure ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowercase () -> Optional[int]:
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase () -> int:
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase () -> Dict:
try:
hide_cursor()
yield
finally:
show_cursor()
| 247
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : List[Any] ):
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(a__ )
__magic_name__ = replicate(a__ )
__magic_name__ = shard(a__ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(a__ , jax.device_count() )
__magic_name__ = sd_pipe(a__ , a__ , a__ , num_inference_steps=25 , jit=a__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self : Tuple ):
__magic_name__ = '''stabilityai/stable-diffusion-2'''
__magic_name__ , __magic_name__ = FlaxDPMSolverMultistepScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
a__ , scheduler=a__ , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = scheduler_params
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(a__ )
__magic_name__ = replicate(a__ )
__magic_name__ = shard(a__ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(a__ , jax.device_count() )
__magic_name__ = sd_pipe(a__ , a__ , a__ , num_inference_steps=25 , jit=a__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 245
|
'''simple docstring'''
from collections.abc import Callable
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
__magic_name__ = a
__magic_name__ = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__magic_name__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
__magic_name__ = mid
else:
__magic_name__ = mid
__magic_name__ = start + (end - start) / 2.0
return mid
def UpperCamelCase ( a ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 245
| 1
|
from __future__ import annotations
import math
def A__ ( __A : int ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( __A : int ) ->list[int]:
__A =str(__A )
__A =[n]
for i in range(1 , len(__A ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A__ ( __A : int ) ->bool:
if len(str(__A ) ) > 3:
if not is_prime(int(str(__A )[-3:] ) ) or not is_prime(int(str(__A )[:3] ) ):
return False
return True
def A__ ( __A : int = 11 ) ->list[int]:
__A =[]
__A =13
while len(__A ) != count:
if validate(__A ):
__A =list_truncated_nums(__A )
if all(is_prime(__A ) for i in list_nums ):
list_truncated_primes.append(__A )
num += 2
return list_truncated_primes
def A__ ( ) ->int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 184
|
def A__ ( __A : str , __A : str ) ->str:
if not (isinstance(__A , __A ) and isinstance(__A , __A )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
__A =len(__A )
__A =len(__A )
__A =[[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__A =0
__A =0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__A =1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__A =i
__A =dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = len(lowerCamelCase )
__magic_name__ : Dict = sum(lowerCamelCase )
__magic_name__ : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__magic_name__ : int = True
for i in range(1 , s + 1 ):
__magic_name__ : Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__magic_name__ : str = dp[i][j - 1]
if arr[i - 1] <= j:
__magic_name__ : int = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__magic_name__ : Tuple = s - 2 * j
break
return diff
| 147
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case : T | None , snake_case : U | None ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[Any] = key
__magic_name__ : str = val
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ) -> str:
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ , __magic_name__ : Tuple = self.rear, self.head
def __repr__( self : str ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ['''DoubleLinkedList''']
__magic_name__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(snake_case ) )
__magic_name__ : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
__magic_name__ : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ : Dict = node
__magic_name__ : Optional[int] = previous
__magic_name__ : Tuple = node
__magic_name__ : Optional[int] = self.rear
def _UpperCAmelCase ( self : str , snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__magic_name__ : str = node.next
__magic_name__ : Dict = node.prev
__magic_name__ : Any = None
__magic_name__ : Dict = None
return node
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
snake_case_ = {}
def __init__( self : Dict , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ : str = capacity
__magic_name__ : Tuple = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = 0
__magic_name__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[str] , snake_case : T ) -> bool:
'''simple docstring'''
return key in self.cache
def _UpperCAmelCase ( self : Optional[int] , snake_case : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__magic_name__ : DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self : str , snake_case : T , snake_case : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ : List[str] = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ : Any = value
self.list.add(snake_case )
@classmethod
def _UpperCAmelCase ( cls : Tuple , snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ : Any = LRUCache(snake_case )
__magic_name__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ : Any = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , '''cache_info''' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=2_81_23 ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 78
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_5_0 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=snake_case__ , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_5_0 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_5_0 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='np' , ).images
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=5 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 439
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Dict:
if "cls_token" in name:
a__ : Union[str, Any] = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
a__ : Optional[Any] = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
a__ : List[Any] = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
a__ : Dict = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a__ : Optional[Any] = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : Optional[int] = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
a__ : Dict = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
a__ : Any = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
a__ : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a__ : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
a__ : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a__ : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a__ : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a__ : int = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
a__ : Union[str, Any] = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
a__ : List[Any] = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
a__ : Tuple = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
a__ : Dict = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
a__ : List[str] = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Any:
for key in orig_state_dict.copy().keys():
a__ : Optional[Any] = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
a__ : List[str] = key.split("." )
a__ : Optional[Any] = int(key_split[1] )
if "decoder_blocks" in key:
a__ : List[str] = config.decoder_hidden_size
a__ : Tuple = "decoder.decoder_layers."
if "weight" in key:
a__ : List[str] = val[:dim, :]
a__ : str = val[dim : dim * 2, :]
a__ : Optional[Any] = val[-dim:, :]
elif "bias" in key:
a__ : Dict = val[:dim]
a__ : Dict = val[dim : dim * 2]
a__ : List[Any] = val[-dim:]
else:
a__ : Tuple = config.hidden_size
a__ : Any = "vit.encoder.layer."
if "weight" in key:
a__ : str = val[:dim, :]
a__ : Optional[int] = val[dim : dim * 2, :]
a__ : str = val[-dim:, :]
elif "bias" in key:
a__ : Any = val[:dim]
a__ : Dict = val[dim : dim * 2]
a__ : Tuple = val[-dim:]
else:
a__ : List[str] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
a__ : Dict = ViTMAEConfig()
if "large" in checkpoint_url:
a__ : Optional[Any] = 10_24
a__ : Optional[int] = 40_96
a__ : Optional[Any] = 24
a__ : str = 16
elif "huge" in checkpoint_url:
a__ : Dict = 14
a__ : Union[str, Any] = 12_80
a__ : str = 51_20
a__ : Dict = 32
a__ : Optional[Any] = 16
a__ : str = ViTMAEForPreTraining(__UpperCamelCase )
a__ : Tuple = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" )["model"]
a__ : Dict = ViTMAEImageProcessor(size=config.image_size )
a__ : str = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
a__ : Optional[Any] = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
a__ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
a__ : List[Any] = ViTMAEImageProcessor(size=config.image_size )
a__ : Dict = image_processor(images=__UpperCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
a__ : List[Any] = model(**__UpperCamelCase )
a__ : Optional[int] = outputs.logits
if "large" in checkpoint_url:
a__ : Tuple = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
a__ : Optional[int] = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
a__ : Union[str, Any] = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 207
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ :Union[str, Any] = logging.get_logger(__name__)
def __lowercase (_lowercase, _lowercase=False ) -> List[str]:
"""simple docstring"""
__lowerCamelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowercase (_lowercase, _lowercase, _lowercase=False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : List[str] = """"""
else:
__lowerCamelCase : Optional[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
__lowerCamelCase : List[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size]
__lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def __lowercase (_lowercase ) -> Tuple:
"""simple docstring"""
__lowerCamelCase : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase, _lowercase )
def __lowercase (_lowercase, _lowercase, _lowercase ) -> int:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = dct.pop(_lowercase )
__lowerCamelCase : Dict = val
def __lowercase () -> List[str]:
"""simple docstring"""
__lowerCamelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowercase (_lowercase, _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase : str = ViTConfig()
__lowerCamelCase : Optional[int] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCamelCase : List[str] = True
__lowerCamelCase : Any = int(vit_name[-12:-10] )
__lowerCamelCase : int = int(vit_name[-9:-6] )
else:
__lowerCamelCase : Optional[Any] = 1_000
__lowerCamelCase : Optional[Any] = """huggingface/label-files"""
__lowerCamelCase : int = """imagenet-1k-id2label.json"""
__lowerCamelCase : str = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type="""dataset""" ), """r""" ) )
__lowerCamelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
__lowerCamelCase : List[str] = idalabel
__lowerCamelCase : str = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Tuple = int(vit_name[-6:-4] )
__lowerCamelCase : Tuple = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
__lowerCamelCase : Union[str, Any] = 192
__lowerCamelCase : List[Any] = 768
__lowerCamelCase : List[Any] = 12
__lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("""small""" ):
__lowerCamelCase : Any = 384
__lowerCamelCase : Union[str, Any] = 1_536
__lowerCamelCase : Union[str, Any] = 12
__lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
__lowerCamelCase : Optional[int] = 768
__lowerCamelCase : Union[str, Any] = 2_304
__lowerCamelCase : Any = 8
__lowerCamelCase : Union[str, Any] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
__lowerCamelCase : Union[str, Any] = 1_024
__lowerCamelCase : Any = 4_096
__lowerCamelCase : List[str] = 24
__lowerCamelCase : int = 16
elif vit_name[4:].startswith("""huge""" ):
__lowerCamelCase : str = 1_280
__lowerCamelCase : List[str] = 5_120
__lowerCamelCase : Optional[Any] = 32
__lowerCamelCase : Any = 16
# load original model from timm
__lowerCamelCase : Union[str, Any] = timm.create_model(_lowercase, pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowercase )
__lowerCamelCase : Dict = create_rename_keys(_lowercase, _lowercase )
for src, dest in rename_keys:
rename_key(_lowercase, _lowercase, _lowercase )
read_in_q_k_v(_lowercase, _lowercase, _lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCamelCase : Tuple = ViTModel(_lowercase ).eval()
else:
__lowerCamelCase : int = ViTForImageClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCamelCase : Any = DeiTImageProcessor(size=config.image_size )
else:
__lowerCamelCase : str = ViTImageProcessor(size=config.image_size )
__lowerCamelCase : int = image_processor(images=prepare_img(), return_tensors="""pt""" )
__lowerCamelCase : int = encoding["""pixel_values"""]
__lowerCamelCase : List[str] = model(_lowercase )
if base_model:
__lowerCamelCase : int = timm_model.forward_features(_lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowercase, outputs.pooler_output, atol=1e-3 )
else:
__lowerCamelCase : Any = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase, outputs.logits, atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowercase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCAmelCase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase__ :Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 150
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, ) -> Optional[Any]:
"""simple docstring"""
if config_name_or_path is None:
__lowerCamelCase : str = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__lowerCamelCase : List[str] = RagConfig.from_pretrained(_lowercase )
__lowerCamelCase : str = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = gen_config
__lowerCamelCase : str = question_encoder_config
__lowerCamelCase : List[str] = model_class.from_pretrained_question_encoder_generator(
_lowercase, _lowercase, config=_lowercase )
rag_model.save_pretrained(_lowercase )
# Sanity check.
model_class.from_pretrained(_lowercase )
# Save tokenizers.
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowercase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCAmelCase__ :str = parser.parse_args()
UpperCAmelCase__ :Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 150
| 1
|
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase_ ( ):
print(sum_of_series(1, 1, 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =nn.functional.normalize(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase, normalized_text_embeds.t() )
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Optional[Any] = CLIPConfig
_A : Dict = ["CLIPEncoderLayer"]
def __init__( self : Tuple ,_UpperCamelCase : CLIPConfig ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE__ =nn.Linear(config.vision_config.hidden_size ,config.projection_dim ,bias=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(1_7 ,config.projection_dim ) ,requires_grad=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(3 ,config.projection_dim ) ,requires_grad=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(1_7 ) ,requires_grad=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(3 ) ,requires_grad=_UpperCamelCase )
@torch.no_grad()
def __A ( self : Dict ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.vision_model(_UpperCamelCase )[1] # pooled_output
SCREAMING_SNAKE_CASE__ =self.visual_projection(_UpperCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =image_embeds.shape[0]
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE__ =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE__ =special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE__ =self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE__ =round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
SCREAMING_SNAKE_CASE__ =0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE__ =cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE__ =self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE__ =round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCamelCase )
result.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __A ( self : str ,_UpperCamelCase : torch.FloatTensor ,_UpperCamelCase : torch.FloatTensor ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.vision_model(_UpperCamelCase )[1] # pooled_output
SCREAMING_SNAKE_CASE__ =self.visual_projection(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.special_care_embeds )
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE__ =0.0
SCREAMING_SNAKE_CASE__ =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE__ =torch.any(special_scores > 0 ,dim=1 )
SCREAMING_SNAKE_CASE__ =special_care * 0.01
SCREAMING_SNAKE_CASE__ =special_adjustment.unsqueeze(1 ).expand(-1 ,cos_dist.shape[1] )
SCREAMING_SNAKE_CASE__ =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE__ =torch.any(concept_scores > 0 ,dim=1 )
return images, has_nsfw_concepts
| 588
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = '▁'
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__A = {
'google/pegasus-xsum': 5_1_2,
}
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Any = VOCAB_FILES_NAMES
_UpperCAmelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<pad>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<mask_2>" , _UpperCAmelCase="<mask_1>" , _UpperCAmelCase=None , _UpperCAmelCase=103 , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: str = offset
if additional_special_tokens is not None:
if not isinstance(A__ , A__ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A__ )}, but is"""
F""" {type(A__ )}""" )
lowercase__: Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A__ ) , self.offset - 1 )
]
if len(set(A__ ) ) != len(A__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase__: List[str] = additional_special_tokens_extended
else:
lowercase__: Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
lowercase__: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , mask_token=A__ , pad_token=A__ , mask_token_sent=A__ , offset=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
lowercase__: Any = mask_token_sent
lowercase__: int = vocab_file
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
# add special tokens to encoder dict
lowercase__: Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase__: Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.offset
def _snake_case ( self ):
lowercase__: Tuple = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase__: Any = self.__dict__.copy()
lowercase__: Union[str, Any] = None
return state
def __setstate__( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__: List[str] = {}
lowercase__: Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.encode(A__ , out_type=A__ )
def _snake_case ( self , _UpperCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase__: str = self.sp_model.piece_to_id(A__ )
return sp_id + self.offset
def _snake_case ( self , _UpperCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase__: List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = []
lowercase__: Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
lowercase__: Dict = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def _snake_case ( self , _UpperCAmelCase=False ):
return 1
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(A__ )
elif token_ids_a is None:
return self._special_token_mask(A__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__: Any = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
lowercase__: str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 586
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
def UpperCamelCase (lowercase_: "pyspark.sql.DataFrame" , lowercase_: List[int] , ) -> Dict:
import pyspark
def generate_fn():
A__ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
A__ : List[Any] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
A__ : Optional[int] = partition_df.collect()
A__ : Any = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _a (_BaseExamplesIterable ):
'''simple docstring'''
def __init__( self , A__ , A__=None , ):
A__ : List[str] = df
A__ : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
A__ : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def __A ( self , A__ ):
A__ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(A__ )
return SparkExamplesIterable(self.df , partition_order=A__ )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = self.split_shard_indices_by_worker(A__ , A__ )
return SparkExamplesIterable(self.df , partition_order=A__ )
@property
def __A ( self ):
return len(self.partition_order )
class _a (datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = SparkConfig
def __init__( self , A__ , A__ = None , A__ = None , **A__ , ):
import pyspark
A__ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
A__ : int = df
A__ : Any = working_dir
super().__init__(
cache_dir=A__ , config_name=str(self.df.semanticHash() ) , **A__ , )
def __A ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(A__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=A__ )
A__ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(A__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A__ : int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(A__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self , A__ ):
import pyspark
def get_arrow_batch_size(A__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
A__ : Dict = self.df.count()
A__ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A__ : Union[str, Any] = (
self.df.limit(A__ )
.repartition(1 )
.mapInArrow(A__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A__ : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A__ : Dict = min(A__ , int(approx_total_size / max_shard_size ) )
A__ : int = self.df.repartition(A__ )
def __A ( self , A__ , A__ , A__ , ):
import pyspark
A__ : Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter
A__ : Any = os.path.join(self._working_dir , os.path.basename(A__ ) ) if self._working_dir else fpath
A__ : Union[str, Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A__ : str = self.config.features
A__ : Union[str, Any] = self._writer_batch_size
A__ : Tuple = self._fs.storage_options
def write_arrow(A__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A__ : str = pyspark.TaskContext().taskAttemptId()
A__ : str = next(A__ , A__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
A__ : List[Any] = 0
A__ : Optional[Any] = writer_class(
features=A__ , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=A__ , storage_options=A__ , embed_local_files=A__ , )
A__ : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(A__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A__ , A__ : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
A__ : Tuple = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=A__ , storage_options=A__ , embed_local_files=A__ , )
A__ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(A__ )
if writer._num_bytes > 0:
A__ , A__ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(A__ ) ):
A__ : Optional[Any] = os.path.join(os.path.dirname(A__ ) , os.path.basename(A__ ) )
shutil.move(A__ , A__ )
A__ : Tuple = (
self.df.mapInArrow(A__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self , A__ , A__ = "arrow" , A__ = None , A__ = None , **A__ , ):
self._validate_cache_dir()
A__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(A__ )
A__ : Any = not is_remote_filesystem(self._fs )
A__ : Optional[int] = os.path.join if is_local else posixpath.join
A__ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
A__ : Any = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
A__ : Any = path_join(self._output_dir , A__ )
A__ : Tuple = 0
A__ : str = 0
A__ : List[Any] = 0
A__ : List[Any] = []
A__ : Optional[Any] = []
for task_id, content in self._prepare_split_single(A__ , A__ , A__ ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(A__ )
A__ : Optional[int] = total_num_examples
A__ : Union[str, Any] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
A__ : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A__ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
A__ , A__ , A__ , ):
rename(
A__ , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
A__ : List[Any] = []
A__ : Union[str, Any] = 0
for i in range(len(A__ ) ):
A__ , A__ : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(A__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(A__ , len(A__ ) ).map(lambda A__ : _rename_shard(*A__ ) ).collect()
else:
# don't use any pattern
A__ : List[Any] = 0
A__ : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(A__ , """""" ) , )
def __A ( self , A__ , ):
return SparkExamplesIterable(self.df )
| 456
| 0
|
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase = b % a, a
return b
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
if gcd(__lowerCAmelCase , __lowerCAmelCase ) != 1:
_UpperCAmelCase = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 1, m
while va != 0:
_UpperCAmelCase = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = "T5Config"
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = jnp.zeros_like(__UpperCamelCase )
A_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A_ = shifted_input_ids.at[:, 0].set(__UpperCamelCase )
A_ = jnp.where(shifted_input_ids == -1_00 , __UpperCamelCase , __UpperCamelCase )
return shifted_input_ids
class __lowercase ( A ):
__magic_name__ : str = '''mt5'''
__magic_name__ : str = MTaConfig
class __lowercase ( A ):
__magic_name__ : List[str] = '''mt5'''
__magic_name__ : Optional[Any] = MTaConfig
class __lowercase ( A ):
__magic_name__ : Any = '''mt5'''
__magic_name__ : List[str] = MTaConfig
| 141
| 0
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def lowerCamelCase ( _snake_case : Dict ,_snake_case : List[str] ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowercase__ = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,_snake_case ,)
is not None
):
lowercase__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase__ = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
lowercase__ = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
lowercase__ = True
if not attribute_used:
lowercase__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase__ = True
elif attribute.endswith("_token_id" ):
lowercase__ = True
# configuration class specific cases
if not case_allowed:
lowercase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
lowercase__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase ( _snake_case : Dict ):
'''simple docstring'''
lowercase__ = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase__ = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
lowercase__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase__ = {}
if len(config_class.attribute_map ) > 0:
lowercase__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase__ = inspect.getsourcefile(_snake_case )
lowercase__ = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase__ = [os.path.join(_snake_case ,_snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("modeling_" )]
# Get the source code strings
lowercase__ = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
lowercase__ = []
for config_param, default_value in zip(_snake_case ,_snake_case ):
# `attributes` here is all the variant names for `config_param`
lowercase__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case ,_snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
lowercase__ = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
lowercase__ = unused_attributes
if len(_snake_case ) > 0:
lowercase__ = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 716
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=16 ,UpperCAmelCase_=[32, 64, 128] ,UpperCAmelCase_=[1, 2, 1] ,UpperCAmelCase_=[2, 2, 4] ,UpperCAmelCase_=2 ,UpperCAmelCase_=2.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=10 ,UpperCAmelCase_=8 ,UpperCAmelCase_=["stage1", "stage2"] ,UpperCAmelCase_=[1, 2] ,) -> Dict:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def _a ( self ) -> int:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Any:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = FocalNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = FocalNetForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = self.type_sequence_label_size
lowercase__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[str]:
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case (UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ :str = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ :str = False
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :Dict = False
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :Union[str, Any] = False
def _a ( self ) -> Any:
lowercase__ = FocalNetModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,embed_dim=37 ,has_text_modality=UpperCAmelCase_ )
def _a ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> List[Any]:
return
def _a ( self ) -> Tuple:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def _a ( self ) -> Dict:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _a ( self ) -> str:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def _a ( self ) -> Union[str, Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
# FocalNet has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = reshaped_hidden_states[0].shape
lowercase__ = (
reshaped_hidden_states[0].view(UpperCAmelCase_ ,UpperCAmelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _a ( self ) -> str:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
@slow
def _a ( self ) -> Dict:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FocalNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[str]:
lowercase__ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Tuple = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ :int = FocalNetConfig
lowerCAmelCase__ :List[Any] = False
def _a ( self ) -> Optional[int]:
lowercase__ = FocalNetModelTester(self )
| 539
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = 1_0
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = [1, 2, 3, 4]
lowerCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowerCAmelCase_ , lowerCAmelCase_ = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = ''
lowerCAmelCase_ , lowerCAmelCase_ = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
self.assertEqual(lowercase_ , [] )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowerCAmelCase_ , lowerCAmelCase_ = process_story(lowercase_ )
lowerCAmelCase_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowercase_ , lowercase_ )
lowerCAmelCase_ = ['It was the best of times.']
self.assertEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([1, 2, 3, 4] )
lowerCAmelCase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowerCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = 1_0_1
lowerCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowerCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCAmelCase_ = compute_token_type_ids(lowercase_ , lowercase_ )
np.testing.assert_array_equal(lowercase_ , lowercase_ )
| 318
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = ScoreSdeVeScheduler()
lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ , return_dict=lowercase_ )[
0
]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'google/ncsnpp-church-256'
lowerCAmelCase_ = UNetaDModel.from_pretrained(lowercase_ )
lowerCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 318
| 1
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase ( snake_case__ ,snake_case__=7 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
if token is not None:
_SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
_SCREAMING_SNAKE_CASE = """636036"""
_SCREAMING_SNAKE_CASE = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
_SCREAMING_SNAKE_CASE = requests.get(snake_case__ ,headers=snake_case__ ).json()
return result["workflow_runs"]
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_daily_ci_runs(snake_case__ )
_SCREAMING_SNAKE_CASE = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_SCREAMING_SNAKE_CASE = workflow_run["""id"""]
break
return workflow_run_id
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_SCREAMING_SNAKE_CASE = get_artifacts_links(worflow_run_id=snake_case__ ,token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_SCREAMING_SNAKE_CASE = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ ,artifact_url=snake_case__ ,output_dir=snake_case__ ,token=snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for artifact_name in artifact_names:
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,F'{artifact_name}.zip' )
if os.path.isfile(snake_case__ ):
_SCREAMING_SNAKE_CASE = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_SCREAMING_SNAKE_CASE = f.read().decode("""UTF-8""" )
return results
| 702
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = KandinskyInpaintPipeline
__snake_case : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case : Optional[int] = False
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: int , UpperCAmelCase_: Any , UpperCAmelCase_: str=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase ( self: str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """a hat"""
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 569
| 0
|
"""simple docstring"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , a_ : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = val
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
def a ( self : int , a_ : Union[str, Any] )-> Tuple:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase_ : Optional[Any] = Node(a_ )
else:
self.left.insert(a_ )
elif val > self.val:
if self.right is None:
UpperCAmelCase_ : Optional[Any] = Node(a_ )
else:
self.right.insert(a_ )
else:
UpperCAmelCase_ : List[Any] = val
def A_ ( lowercase , lowercase ) -> Dict:
"""simple docstring"""
if root:
inorder(root.left , lowercase )
res.append(root.val )
inorder(root.right , lowercase )
def A_ ( lowercase ) -> Tuple:
"""simple docstring"""
if len(lowercase ) == 0:
return arr
UpperCAmelCase_ : int = Node(arr[0] )
for i in range(1 , len(lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase_ : List[str] = []
inorder(lowercase , lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 470
|
"""simple docstring"""
def A_ ( lowercase ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = generate_pascal_triangle(lowercase )
for row_idx in range(lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = []
for current_row_idx in range(lowercase ):
UpperCAmelCase_ : Optional[Any] = populate_current_row(lowercase , lowercase )
triangle.append(lowercase )
return triangle
def A_ ( lowercase , lowercase ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = 1, 1
for current_col_idx in range(1 , lowercase ):
calculate_current_element(
lowercase , lowercase , lowercase , lowercase )
return current_row
def A_ ( lowercase , lowercase , lowercase , lowercase , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : str = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ : int = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ : Any = above_to_left_elt + above_to_right_elt
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = [[1]]
for row_index in range(1 , lowercase ):
UpperCAmelCase_ : Any = [0] + result[-1] + [0]
UpperCAmelCase_ : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ : Dict = sum(divmod(lowercase , 2 ) )
UpperCAmelCase_ : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ : int = row_first_half + row_second_half
result.append(lowercase )
return result
def A_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
UpperCAmelCase_ : int = f'''{func.__name__}({value})'''
UpperCAmelCase_ : int = timeit(f'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase , lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 470
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A = ["""small""", """medium""", """large"""]
A = """lm_head.decoder.weight"""
A = """lm_head.weight"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = torch.load(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = d.pop(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
A = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 487
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
__UpperCAmelCase : List[str] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__UpperCAmelCase : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
__UpperCAmelCase : Tuple = primes[:idx]
break
__UpperCAmelCase , __UpperCAmelCase : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__UpperCAmelCase : Optional[int] = False
for r in range(UpperCamelCase ):
__UpperCAmelCase : Dict = pow(UpperCamelCase , d * 2**r , UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__UpperCAmelCase : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 487
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase ={
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowercase =f'{src_lang}-{tgt_lang}'
lowercase =f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
lowercase =os.path.join(lowercase_ , '''README.md''' )
print(f'Generating {path}' )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowercase_ )
# make sure we are under the root of the project
_UpperCAmelCase : Optional[Any] = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCAmelCase : str = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 72
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 1
|
"""simple docstring"""
import math
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
__lowerCAmelCase = n
__lowerCAmelCase = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
__lowerCAmelCase = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
__lowerCAmelCase = w
def A__ ( self ) -> Union[str, Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowerCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A__ ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 718
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 573
| 0
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def snake_case ( A__ ):
return getitem, k
def snake_case ( A__ ,A__ ):
return setitem, k, v
def snake_case ( A__ ):
return delitem, k
def snake_case ( A__ ,A__ ,*A__ ):
try:
return fun(A__ ,*A__ ), None
except Exception as e:
return None, e
lowerCamelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" ,(
pytest.param(_add_items ,id="add items" ),
pytest.param(_overwrite_items ,id="overwrite items" ),
pytest.param(_delete_items ,id="delete items" ),
pytest.param(_access_absent_items ,id="access absent items" ),
pytest.param(_add_with_resize_up ,id="add with resize up" ),
pytest.param(_add_with_resize_down ,id="add with resize down" ),
) ,)
def snake_case ( A__ ):
UpperCAmelCase_ : List[Any] = HashMap(initial_block_size=4 )
UpperCAmelCase_ : List[Any] = {}
for _, (fun, *args) in enumerate(A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = _run_operation(A__ ,A__ ,*A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = _run_operation(A__ ,A__ ,*A__ )
assert my_res == py_res
assert str(A__ ) == str(A__ )
assert set(A__ ) == set(A__ )
assert len(A__ ) == len(A__ )
assert set(my.items() ) == set(py.items() )
def snake_case ( ):
def is_public(A__ ) -> bool:
return not name.startswith("_" )
UpperCAmelCase_ : int = {name for name in dir({} ) if is_public(A__ )}
UpperCAmelCase_ : Any = {name for name in dir(HashMap() ) if is_public(A__ )}
assert dict_public_names > hash_public_names
| 95
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 328
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : Optional[int] , A : Optional[Any] , A : Tuple=1_3 , A : Optional[int]=7 , A : Tuple=True , A : Any=True , A : Union[str, Any]=True , A : Any=True , A : List[Any]=9_9 , A : Optional[int]=1_6 , A : Tuple=3_6 , A : str=6 , A : Tuple=6 , A : Optional[Any]=6 , A : Any=3_7 , A : int="gelu" , A : Optional[int]=0.1 , A : Dict=0.1 , A : Union[str, Any]=5_1_2 , A : int=1_6 , A : int=2 , A : Tuple=0.02 , A : Optional[Any]=3 , A : str=4 , A : Tuple=None , ):
'''simple docstring'''
a : int = parent
a : List[str] = batch_size
a : List[str] = seq_length
a : int = is_training
a : int = use_input_mask
a : List[str] = use_token_type_ids
a : int = use_labels
a : int = vocab_size
a : Any = embedding_size
a : Any = hidden_size
a : Any = num_hidden_layers
a : List[Any] = num_hidden_groups
a : Optional[int] = num_attention_heads
a : str = intermediate_size
a : str = hidden_act
a : Dict = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Optional[int] = type_vocab_size
a : int = type_sequence_label_size
a : Tuple = initializer_range
a : str = num_labels
a : Union[str, Any] = num_choices
a : Dict = scope
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Union[str, Any] = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : List[str] = None
a : Dict = None
a : Dict = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : int = ids_tensor([self.batch_size] , self.num_choices )
a : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Optional[int] , A : Tuple , A : Optional[Any] , A : Optional[int] , A : Dict , A : List[str] , A : Optional[Any] , A : str ):
'''simple docstring'''
a : Dict = AlbertModel(config=A )
model.to(A )
model.eval()
a : List[str] = model(A , attention_mask=A , token_type_ids=A )
a : str = model(A , token_type_ids=A )
a : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[Any] , A : int , A : Any , A : List[str] , A : List[str] , A : Optional[int] , A : Tuple , A : Optional[int] ):
'''simple docstring'''
a : int = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
a : Dict = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : int , A : Tuple , A : Any , A : Dict , A : Optional[int] , A : Dict , A : int , A : str ):
'''simple docstring'''
a : Optional[int] = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
a : Optional[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : int , A : Union[str, Any] , A : int , A : Dict , A : Tuple , A : str , A : List[Any] , A : str ):
'''simple docstring'''
a : Optional[int] = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
a : List[str] = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , A : List[Any] , A : List[str] , A : List[str] , A : Any , A : List[Any] , A : Dict , A : str ):
'''simple docstring'''
a : Optional[int] = self.num_labels
a : Optional[Any] = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
a : str = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Optional[Any] , A : List[Any] , A : Optional[int] , A : List[Any] , A : str , A : Dict , A : Optional[Any] , A : Any ):
'''simple docstring'''
a : Optional[int] = self.num_labels
a : str = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
a : List[str] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Dict , A : Optional[int] , A : int , A : int , A : List[Any] , A : List[str] , A : Optional[Any] , A : str ):
'''simple docstring'''
a : List[str] = self.num_choices
a : str = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
a : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Union[str, Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Tuple = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Dict = config_and_inputs
a : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
def lowerCamelCase__ ( self : Union[str, Any] , A : List[str] , A : Tuple , A : Tuple=False ):
'''simple docstring'''
a : List[Any] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
a : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
a : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : int = AlbertModelTester(self )
a : List[Any] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Optional[Any] = type
self.model_tester.create_and_check_model(*A )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : List[Any] = AlbertModel.from_pretrained('albert-base-v2' )
a : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a : Optional[Any] = model(A , attention_mask=A )[0]
a : List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
a : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 118
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase : List[str] = logging.getLogger()
def snake_case (A_ :Path , A_ :list ):
'''simple docstring'''
a : Optional[int] = '\n'.join(A_ )
Path(A_ ).open('w' ).writelines(A_ )
_UpperCamelCase : Optional[Any] = 'patrickvonplaten/t5-tiny-random'
_UpperCamelCase : str = 'sshleifer/bart-tiny-random'
_UpperCamelCase : Any = 'sshleifer/tiny-mbart'
_UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : List[Any] , A : Optional[Any] ):
'''simple docstring'''
a : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
a : List[str] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
a : Optional[Any] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(A , A )
a : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
a : List[Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
a : List[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(A , 'argv' , A ):
run_generate()
assert Path(A ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.run_eval_tester(A )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self : Union[str, Any] , A : List[Any] ):
'''simple docstring'''
self.run_eval_tester(A )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self : Optional[Any] , A : List[Any] ):
'''simple docstring'''
a : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
a : int = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
a : Optional[Any] = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
a : int = Path(self.get_auto_remove_tmp_dir() )
a : int = str(tmp_dir / 'scores.json' )
a : Optional[int] = str(tmp_dir / 'val.target' )
_dump_articles(A , text['en'] )
_dump_articles(A , text['de'] )
a : List[str] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
a : Any = F'''
run_eval_search.py
{model}
{str(A )}
{str(A )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(A , 'argv' , A ):
with CaptureStdout() as cs:
run_search()
a : Tuple = [' num_beams | length_penalty', model, 'Best score args']
a : List[str] = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(A )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(A ).exists()
os.remove(Path(A ) )
| 118
| 1
|
from ... import PretrainedConfig
lowerCamelCase : List[str] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class A( __lowercase ):
'''simple docstring'''
UpperCamelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase = '''nezha'''
def __init__( self : List[Any] , A_ : str=21128 , A_ : Tuple=768 , A_ : int=12 , A_ : List[str]=12 , A_ : List[str]=3072 , A_ : Tuple="gelu" , A_ : List[Any]=0.1 , A_ : Dict=0.1 , A_ : int=512 , A_ : List[str]=64 , A_ : Optional[Any]=2 , A_ : List[str]=0.02 , A_ : Dict=1E-12 , A_ : Any=0.1 , A_ : str=0 , A_ : List[Any]=2 , A_ : Tuple=3 , A_ : Union[str, Any]=True , **A_ : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = max_relative_position
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = use_cache
| 70
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = "ResNetConfig"
# Base docstring
lowerCAmelCase__ = "microsoft/resnet-50"
lowerCAmelCase__ = [1, 2_0_4_8, 7, 7]
# Image classification docstring
lowerCAmelCase__ = "microsoft/resnet-50"
lowerCAmelCase__ = "tiger cat"
lowerCAmelCase__ = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
super().__init__()
_lowercase =nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_lowercase =nn.BatchNormad(lowerCAmelCase_ )
_lowercase =ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =self.convolution(lowerCAmelCase_ )
_lowercase =self.normalization(lowerCAmelCase_ )
_lowercase =self.activation(lowerCAmelCase_ )
return hidden_state
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__()
_lowercase =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowercase =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowercase =config.num_channels
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
_lowercase =self.embedder(lowerCAmelCase_ )
_lowercase =self.pooler(lowerCAmelCase_ )
return embedding
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ):
super().__init__()
_lowercase =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_lowercase =nn.BatchNormad(lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =self.convolution(lowerCAmelCase_ )
_lowercase =self.normalization(lowerCAmelCase_ )
return hidden_state
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =(
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_lowercase =ACTaFN[activation]
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =hidden_state
_lowercase =self.layer(lowerCAmelCase_ )
_lowercase =self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_lowercase =self.activation(lowerCAmelCase_ )
return hidden_state
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ):
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =out_channels // reduction
_lowercase =(
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_lowercase =ACTaFN[activation]
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =hidden_state
_lowercase =self.layer(lowerCAmelCase_ )
_lowercase =self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_lowercase =self.activation(lowerCAmelCase_ )
return hidden_state
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ):
super().__init__()
_lowercase =ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
_lowercase =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =input
for layer in self.layers:
_lowercase =layer(lowerCAmelCase_ )
return hidden_state
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__()
_lowercase =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowercase =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ):
_lowercase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
_lowercase =stage_module(lowerCAmelCase_ )
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ResNetConfig
__SCREAMING_SNAKE_CASE = 'resnet'
__SCREAMING_SNAKE_CASE = 'pixel_values'
__SCREAMING_SNAKE_CASE = True
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =value
lowerCAmelCase__ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCAmelCase__ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , lowerCamelCase_ , )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
_lowercase =config
_lowercase =ResNetEmbeddings(lowerCAmelCase_ )
_lowercase =ResNetEncoder(lowerCAmelCase_ )
_lowercase =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.embedder(lowerCAmelCase_ )
_lowercase =self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_lowercase =encoder_outputs[0]
_lowercase =self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCamelCase_ , )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
_lowercase =config.num_labels
_lowercase =ResNetModel(lowerCAmelCase_ )
# classification head
_lowercase =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_lowercase =outputs.pooler_output if return_dict else outputs[1]
_lowercase =self.classifier(lowerCAmelCase_ )
_lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase ="single_label_classification"
else:
_lowercase ="multi_label_classification"
if self.config.problem_type == "regression":
_lowercase =MSELoss()
if self.num_labels == 1:
_lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowercase =loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase =BCEWithLogitsLoss()
_lowercase =loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_lowercase =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , lowerCamelCase_ , )
class _a ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_lowercase =[config.embedding_size] + config.hidden_sizes
_lowercase =ResNetEmbeddings(lowerCAmelCase_ )
_lowercase =ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =self.embedder(lowerCAmelCase_ )
_lowercase =self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_lowercase =outputs.hidden_states
_lowercase =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowercase =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 594
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def __lowerCamelCase ( __a : Dict , __a : Any , __a : Optional[int] , __a : Dict , __a : str=0 ) -> str:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowercase =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_lowercase =os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__a , __a )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__a , __a )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase =os.path.join(__a , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__a , exist_ok=__a )
logger.info(f'''Saving model to {ckpt_dir}''' )
_lowercase ={"model": state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def __lowerCamelCase ( __a : Union[str, Any] , __a : Tuple , __a : str , __a : Optional[int] , __a : List[Any]=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
_lowercase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading model from {input_model_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading model from {input_model_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase =(
os.path.join(__a , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
_lowercase ={"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
_lowercase =state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__a )
def __lowerCamelCase ( __a : Tuple , __a : int , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Any=0 ) -> str:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowercase =FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowercase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__a , __a )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
_lowercase =os.path.join(__a , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__a , exist_ok=__a )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __lowerCamelCase ( __a : str , __a : Any , __a : Any , __a : Any , __a : str , __a : List[Any]=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowercase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_lowercase =(
os.path.join(__a , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
_lowercase =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__a ) , )
_lowercase =optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
_lowercase =FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 594
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
snake_case = TypeVar('''T''')
snake_case = Union[List[T], Tuple[T, ...]]
snake_case = Union[T, List[T], Dict[str, T]]
snake_case = Union[str, bytes, os.PathLike]
| 309
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ""
__A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__A = None # compression type in fsspec. ex: "gzip"
__A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , __lowerCAmelCase : str = "" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[dict] = None , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowerCAmelCase = fsspec.open(
__lowerCAmelCase , mode='rb' , protocol=__lowerCAmelCase , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowerCAmelCase = os.path.basename(self.file.path.split('::' )[0] )
_lowerCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowerCAmelCase = None
@classmethod
def a ( cls : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return super()._strip_protocol(__lowerCAmelCase ).lstrip('/' )
def a ( self : Union[str, Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowerCAmelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowerCAmelCase = {f['name']: f}
def a ( self : Optional[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.file.open().read()
def a ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCAmelCase = self._strip_protocol(__lowerCAmelCase )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "bz2"
__A = "bz2"
__A = ".bz2"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "gzip"
__A = "gzip"
__A = ".gz"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "lz4"
__A = "lz4"
__A = ".lz4"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "xz"
__A = "xz"
__A = ".xz"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "zstd"
__A = "zstd"
__A = ".zst"
def __init__( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[dict] = None , __lowerCAmelCase : int = DEFAULT_BLOCK_SIZE , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(
fo=__lowerCAmelCase , mode=__lowerCAmelCase , target_protocol=__lowerCAmelCase , target_options=__lowerCAmelCase , block_size=__lowerCAmelCase , **__lowerCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowerCAmelCase = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = file_
def __enter__( self : List[Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : List[str] , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
self._file.__exit__(*__lowerCAmelCase , **__lowerCAmelCase )
def __iter__( self : Any ):
"""simple docstring"""
return iter(self._file )
def a ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
return getattr(self._file , __lowerCAmelCase )
def fixed_enter(*__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
return WrappedFile(_enter(*__lowerCAmelCase , **__lowerCAmelCase ) )
_lowerCAmelCase = fixed_enter
| 309
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="funnel"
lowerCamelCase__ : Any ={
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=[4, 4, 4] , lowerCamelCase=None , lowerCamelCase=2 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=64 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=None , lowerCamelCase=1e-9 , lowerCamelCase="mean" , lowerCamelCase="relative_shift" , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Any = vocab_size
__magic_name__ : str = block_sizes
__magic_name__ : Tuple = [1] * len(lowerCamelCase ) if block_repeats is None else block_repeats
assert len(lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__magic_name__ : List[str] = num_decoder_layers
__magic_name__ : Any = d_model
__magic_name__ : Optional[int] = n_head
__magic_name__ : Union[str, Any] = d_head
__magic_name__ : Union[str, Any] = d_inner
__magic_name__ : Dict = hidden_act
__magic_name__ : Optional[int] = hidden_dropout
__magic_name__ : Union[str, Any] = attention_dropout
__magic_name__ : int = activation_dropout
__magic_name__ : int = initializer_range
__magic_name__ : Any = initializer_std
__magic_name__ : List[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__magic_name__ : str = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__magic_name__ : Any = attention_type
__magic_name__ : Optional[int] = separate_cls
__magic_name__ : List[Any] = truncate_seq
__magic_name__ : int = pool_q_only
super().__init__(**lowerCamelCase )
@property
def lowercase ( self ) -> Any:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowercase ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 336
|
from __future__ import annotations
class A__ :
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase ( UpperCAmelCase ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
__magic_name__ : Tuple = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : str = Node(5 )
__magic_name__ : List[Any] = Node(6 )
__magic_name__ : Optional[int] = Node(7 )
__magic_name__ : str = Node(8 )
__magic_name__ : str = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case_ :
def __init__( self :str ,__snake_case :Tuple ,__snake_case :str=13 ,__snake_case :int=30 ,__snake_case :str=2 ,__snake_case :str=3 ,__snake_case :Optional[int]=True ,__snake_case :int=True ,__snake_case :Dict=32 ,__snake_case :Optional[int]=5 ,__snake_case :Optional[int]=4 ,__snake_case :Tuple=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :str=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :List[str]=10 ,__snake_case :Union[str, Any]=0.02 ,__snake_case :str=3 ,__snake_case :Tuple=None ,__snake_case :int=2 ,) -> List[Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
a__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 2
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :List[Any] ) -> Dict:
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__snake_case ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Any ,__snake_case :Tuple ,__snake_case :Optional[int] ) -> Any:
a__ = DeiTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ,__snake_case :str ,__snake_case :List[str] ) -> List[Any]:
a__ = DeiTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a__ = 1
a__ = DeiTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__( self :int ,__snake_case :Any ,__snake_case :Tuple ,__snake_case :int ) -> Union[str, Any]:
a__ = self.type_sequence_label_size
a__ = DeiTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = DeiTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = DeiTModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :int ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
pass
def lowerCamelCase__( self :Tuple ) -> int:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,nn.Linear ) )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def lowerCamelCase__( self :str ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :List[str] ,__snake_case :List[str]=False ) -> List[Any]:
a__ = super()._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__( self :Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__snake_case )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a__ = model_class(__snake_case )
model.to(__snake_case )
model.train()
a__ = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
a__ = model(**__snake_case ).loss
loss.backward()
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ = False
a__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a__ = model_class(__snake_case )
model.gradient_checkpointing_enable()
model.to(__snake_case )
model.train()
a__ = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
a__ = model(**__snake_case ).loss
loss.backward()
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__snake_case ),
*get_values(__snake_case ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
a__ = problem_type['title']
a__ = problem_type['num_labels']
a__ = model_class(__snake_case )
model.to(__snake_case )
model.train()
a__ = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if problem_type["num_labels"] > 1:
a__ = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
a__ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__snake_case ) as warning_list:
a__ = model(**__snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowerCamelCase__( self :Tuple ) -> int:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = DeiTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__( self :str ) -> int:
a__ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
__snake_case )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
# verify the logits
a__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase__( self :Optional[int] ) -> Any:
a__ = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' ,torch_dtype=torch.floataa ,device_map='auto' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='pt' )
a__ = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a__ = model(__snake_case )
| 335
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 492
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = retriever_proj_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = num_candidates
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Reader config
_lowerCamelCase : Tuple = span_hidden_size
_lowerCamelCase : int = max_span_width
_lowerCamelCase : Tuple = reader_layer_norm_eps
_lowerCamelCase : Union[str, Any] = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : Optional[Any] = num_block_records
_lowerCamelCase : str = searcher_beam_size
| 492
| 1
|
'''simple docstring'''
UpperCAmelCase_ : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
UpperCAmelCase_ : str = {value: key for key, value in encode_dict.items()}
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE__ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE__ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE : List[str] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533
|
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE ( snake_case = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(snake_case ):
_UpperCAmelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case , snake_case ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
return f"{i * ' '}*" if i else "\n##"
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
_UpperCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(snake_case )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def _SCREAMING_SNAKE_CASE ( snake_case = "." ) -> None:
_UpperCAmelCase = """"""
for filepath in sorted(good_file_paths(snake_case ) ):
_UpperCAmelCase , _UpperCAmelCase = os.path.split(snake_case )
if filepath != old_path:
_UpperCAmelCase = print_path(snake_case , snake_case )
_UpperCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCAmelCase = f"{filepath}/{filename}".replace(""" """ , """%20""" )
_UpperCAmelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f"{md_prefix(snake_case )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 518
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a = TaTokenizerFast
a = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 718
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : NestedDataStructureLike[PathLike] , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Optional[Any] , ) ->Any:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Tuple =path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else {self.split: path_or_paths}
lowerCamelCase__: Optional[int] =Text(
cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.streaming:
lowerCamelCase__: int =self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Optional[Any] =None
lowerCamelCase__: List[str] =None
lowerCamelCase__: str =None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , )
lowerCamelCase__: Any =self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory)
return dataset
| 59
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class a ( UpperCamelCase_ ):
__lowercase = """luke"""
def __init__( self , __UpperCamelCase=5_02_67 , __UpperCamelCase=50_00_00 , __UpperCamelCase=7_68 , __UpperCamelCase=2_56 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A__ : List[str] =vocab_size
A__ : Any =entity_vocab_size
A__ : List[Any] =hidden_size
A__ : Union[str, Any] =entity_emb_size
A__ : Dict =num_hidden_layers
A__ : List[Any] =num_attention_heads
A__ : List[str] =hidden_act
A__ : Dict =intermediate_size
A__ : Any =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : int =type_vocab_size
A__ : Optional[Any] =initializer_range
A__ : Optional[int] =layer_norm_eps
A__ : str =use_entity_aware_attention
A__ : str =classifier_dropout
| 416
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCamelCase ( UpperCamelCase__ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCamelCase__ , UpperCamelCase__ ) -> bool:
UpperCAmelCase__ : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase__ : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase__ : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 , ):
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 ):
def identity_function(UpperCamelCase__ ) -> float:
return x
UpperCAmelCase__ : List[str] = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def _UpperCamelCase ( UpperCamelCase__ ):
def function_to_integrate(UpperCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase__ : Dict = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCamelCase ( UpperCamelCase__ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCamelCase__ , UpperCamelCase__ ) -> bool:
UpperCAmelCase__ : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase__ : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase__ : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 , ):
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 ):
def identity_function(UpperCamelCase__ ) -> float:
return x
UpperCAmelCase__ : List[str] = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def _UpperCamelCase ( UpperCamelCase__ ):
def function_to_integrate(UpperCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase__ : Dict = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
| 1
|
"""simple docstring"""
import torch
def A ( ):
"""simple docstring"""
if torch.cuda.is_available():
snake_case_ :List[str] = torch.cuda.device_count()
else:
snake_case_ :str = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 584
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {'vocab_file': 'sentencepiece.model'}
__UpperCAmelCase : Dict = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__UpperCAmelCase : str = {
'google/rembert': 2_56,
}
class __lowerCAmelCase (__UpperCamelCase ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a , a=False , a=True , a=True , a="[CLS]" , a="[SEP]" , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , **a , ):
"""simple docstring"""
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
snake_case_ :Dict = do_lower_case
snake_case_ :Tuple = remove_space
snake_case_ :List[Any] = keep_accents
snake_case_ :Union[str, Any] = vocab_file
snake_case_ :Optional[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(a )
@property
def _a ( self ):
"""simple docstring"""
return len(self.sp_model )
def _a ( self ):
"""simple docstring"""
snake_case_ :List[Any] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
snake_case_ :Tuple = self.__dict__.copy()
snake_case_ :List[Any] = None
return state
def __setstate__( self , a ):
"""simple docstring"""
snake_case_ :List[Any] = d
snake_case_ :Dict = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _a ( self , a , a=False ):
"""simple docstring"""
snake_case_ :str = self.sp_model.EncodeAsPieces(a )
return pieces
def _a ( self , a ):
"""simple docstring"""
return self.sp_model.PieceToId(a )
def _a ( self , a ):
"""simple docstring"""
return self.sp_model.IdToPiece(a )
def _a ( self , a ):
"""simple docstring"""
snake_case_ :int = self.sp_model.decode_pieces(a )
return out_string
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :List[Any] = [self.sep_token_id]
snake_case_ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a , a = None , a = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :Any = [self.sep_token_id]
snake_case_ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a , a = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error("Vocabulary path ({}) should be a directory".format(a ) )
return
snake_case_ :str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 584
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase_ : Optional[int] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=8 ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCamelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : UNetaDConditionModel , lowercase__ : DDPMScheduler , lowercase__ : VQModel , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
_UpperCamelCase : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case__ ( self : List[Any] , lowercase__ : int , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : str , lowercase__ : Any ) ->Dict:
'''simple docstring'''
if latents is None:
_UpperCamelCase : Dict = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCamelCase : int = latents.to(lowercase__ )
_UpperCamelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def snake_case__ ( self : int , lowercase__ : Tuple=0 ) ->Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCamelCase : Optional[Any] = torch.device(f'''cuda:{gpu_id}''' )
_UpperCamelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def snake_case__ ( self : Optional[Any] , lowercase__ : int=0 ) ->List[str]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCamelCase : Optional[Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCamelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
_UpperCamelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self : int , lowercase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase__ : int = 512 , lowercase__ : int = 512 , lowercase__ : int = 100 , lowercase__ : float = 4.0 , lowercase__ : int = 1 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self._execution_device
_UpperCamelCase : List[Any] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__ ):
_UpperCamelCase : Tuple = torch.cat(lowercase__ , dim=0 )
_UpperCamelCase : Tuple = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase__ , lowercase__ ):
_UpperCamelCase : List[Any] = torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
_UpperCamelCase : Tuple = image_embeds.repeat_interleave(lowercase__ , dim=0 )
_UpperCamelCase : Tuple = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
_UpperCamelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
_UpperCamelCase : str = self.scheduler.timesteps
_UpperCamelCase : int = self.unet.config.in_channels
_UpperCamelCase , _UpperCamelCase : List[Any] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
_UpperCamelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase : Dict = {"image_embeds": image_embeds}
_UpperCamelCase : str = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCamelCase , _UpperCamelCase : Tuple = noise_pred.chunk(2 )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = variance_pred.chunk(2 )
_UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCamelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : List[Any] = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
_UpperCamelCase : List[str] = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCamelCase : str = image * 0.5 + 0.5
_UpperCamelCase : Any = image.clamp(0 , 1 )
_UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCamelCase : List[str] = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 204
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = BeautifulSoup(requests.get(UpperCAmelCase ,params=UpperCAmelCase ).content ,"html.parser" )
_UpperCamelCase : Union[str, Any] = soup.find("div" ,attrs={"class": "gs_ri"} )
_UpperCamelCase : Optional[Any] = div.find("div" ,attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 204
| 1
|
"""simple docstring"""
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase = {}
self.num_vertices += 1
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
self.add_vertex(lowerCamelCase )
self.add_vertex(lowerCamelCase )
if head == tail:
return
_lowerCAmelCase = weight
_lowerCAmelCase = weight
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase ) ):
_lowerCAmelCase = list(edges[i] )
edges.sort(key=lambda lowerCamelCase : e[2] )
for i in range(len(lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = edge
_lowerCAmelCase = weight
_lowerCAmelCase = weight
def __str__(self ):
'''simple docstring'''
_lowerCAmelCase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def A__ (self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def A__ (lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = Graph()
if vertices is None:
_lowerCAmelCase = []
if edges is None:
_lowerCAmelCase = []
for vertex in vertices:
g.add_vertex(lowerCamelCase )
for edge in edges:
g.add_edge(*lowerCamelCase )
return g
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = {}
_lowerCAmelCase = {}
def __len__(self ):
'''simple docstring'''
return len(self.parent )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase )
_lowerCAmelCase = item
_lowerCAmelCase = 0
return item
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase )
if item != self.parent[item]:
_lowerCAmelCase = self.find(self.parent[item] )
return self.parent[item]
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.find(lowerCamelCase )
_lowerCAmelCase = self.find(lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase = roota
return roota
return None
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = graph.num_vertices
_lowerCAmelCase = Graph.UnionFind()
_lowerCAmelCase = []
while num_components > 1:
_lowerCAmelCase = {}
for vertex in graph.get_vertices():
_lowerCAmelCase = -1
_lowerCAmelCase = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = edge
_lowerCAmelCase = union_find.find(lowerCamelCase )
_lowerCAmelCase = union_find.find(lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = cheap_edge[vertex]
if union_find.find(lowerCamelCase ) != union_find.find(lowerCamelCase ):
union_find.union(lowerCamelCase , lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
_lowerCAmelCase = num_components - 1
_lowerCAmelCase = Graph.build(edges=lowerCamelCase )
return mst
| 156
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'llama'
__UpperCamelCase = ['past_key_values']
def __init__(self , lowerCamelCase=32_000 , lowerCamelCase=4_096 , lowerCamelCase=11_008 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase="silu" , lowerCamelCase=2_048 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_key_value_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_range
_lowerCAmelCase = rms_norm_eps
_lowerCAmelCase = pretraining_tp
_lowerCAmelCase = use_cache
_lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , tie_word_embeddings=lowerCamelCase , **lowerCamelCase , )
def A__ (self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
_lowerCAmelCase = self.rope_scaling.get("""type""" , lowerCamelCase )
_lowerCAmelCase = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 156
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:Optional[Any] , _a:List[Any] , _a:Any=7 , _a:str=3 , _a:Tuple=10 , _a:str=18 , _a:List[str]=30 , _a:Tuple=4_00 , _a:str=True , _a:List[str]=None , _a:List[str]=True , _a:Optional[Any]=[0.5, 0.5, 0.5] , _a:List[str]=[0.5, 0.5, 0.5] , _a:int=None , ):
snake_case__ = size if size is not None else {'''shortest_edge''': 18}
snake_case__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = crop_size
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 208
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str | Literal[False]:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
snake_case__ = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
snake_case__ = []
while True:
snake_case__ = ['''$'''] * len(__lowerCAmelCase )
snake_case__ = []
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + 1 , len(__lowerCAmelCase ) ):
snake_case__ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case__ = '''*'''
snake_case__ = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCAmelCase ) == 0:
return pi
snake_case__ = list(set(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
for minterm in minterms:
snake_case__ = ''''''
for _ in range(__lowerCAmelCase ):
snake_case__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCAmelCase )
return temp
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
snake_case__ = [0] * len(__lowerCAmelCase )
for i in range(len(chart[0] ) ):
snake_case__ = 0
snake_case__ = -1
for j in range(len(__lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
snake_case__ = j
if count == 1:
snake_case__ = 1
for i in range(len(__lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
temp.append(prime_implicants[i] )
while True:
snake_case__ = 0
snake_case__ = -1
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = chart[i].count(1 )
if count_n > max_n:
snake_case__ = count_n
snake_case__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[int]]:
snake_case__ = [[0 for x in range(len(__lowerCAmelCase ) )] for x in range(len(__lowerCAmelCase ) )]
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCAmelCase ):
snake_case__ = 1
return chart
def SCREAMING_SNAKE_CASE ( ) -> None:
snake_case__ = int(input('''Enter the no. of variables\n''' ) )
snake_case__ = [
float(__lowerCAmelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case__ = decimal_to_binary(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = check(__lowerCAmelCase )
print('''Prime Implicants are:''' )
print(__lowerCAmelCase )
snake_case__ = prime_implicant_chart(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = selection(__lowerCAmelCase , __lowerCAmelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 208
| 1
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Union[str, Any] = 0
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[Any] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : List[str] = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Any = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
a_ : List[Any] = os.path.join(UpperCamelCase_ , '''fake-roberta''' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
a_ : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoConfig.register('''model''' , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoConfig.register('''bert''' , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
a_ : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
a_ : str = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
a_ : Any = AutoConfig.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a_ : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
a_ : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
a_ : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
a_ : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCamelCase_ )
a_ : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
a_ : List[str] = AutoConfig.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
class SCREAMING_SNAKE_CASE ( lowercase__ ):
snake_case__ = """new-model"""
try:
AutoConfig.register('''new-model''' , UpperCamelCase_ )
# If remote code is not set, the default is to use local
a_ : Optional[int] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
a_ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
a_ : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 466
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _a , _a , _a=0 ) -> str:
'''simple docstring'''
if name is None:
lowercase_ :Optional[int] = None
else:
lowercase_ :Any = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}'''
lowercase_ :List[Any] = fmt.format(_a )
# Print and recurse (if needed).
if isinstance(_a , _a ):
if msg is not None:
print(_a )
for k in val.keys():
recursive_print(_a , val[k] , spaces + 2 )
elif isinstance(_a , torch.Tensor ):
print(_a , ''':''' , val.size() )
else:
print(_a , ''':''' , _a )
def UpperCamelCase ( _a , _a , _a , _a , _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[str] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase_ :Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase_ :int = param.view(*_a )
lowercase_ :Any = param.transpose(0 , 2 )
lowercase_ :Optional[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase_ :Optional[int] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase_ :Union[str, Any] = param.view(*_a )
lowercase_ :Tuple = param.transpose(0 , 1 ).contiguous()
lowercase_ :Dict = param.view(*_a )
return param
def UpperCamelCase ( _a , _a , _a ) -> Optional[int]:
'''simple docstring'''
lowercase_ :Any = {}
# old versions did not store training args
lowercase_ :Optional[Any] = input_state_dict.get('''args''' , _a )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase_ :Any = ds_args.padded_vocab_size
lowercase_ :int = ds_args.max_position_embeddings
lowercase_ :Union[str, Any] = ds_args.hidden_size
lowercase_ :Optional[Any] = ds_args.num_layers
lowercase_ :int = ds_args.num_attention_heads
lowercase_ :List[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase_ :Optional[Any] = config.n_head
# The hidden_size per head.
lowercase_ :Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase_ :int = input_state_dict['''checkpoint_version''']
else:
lowercase_ :Any = 0.0
# The model.
lowercase_ :Union[str, Any] = input_state_dict['''model''']
# The language model.
lowercase_ :str = model['''language_model''']
# The embeddings.
lowercase_ :Any = lm['''embedding''']
# The word embeddings.
lowercase_ :List[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
lowercase_ :int = word_embeddings[: config.vocab_size, :]
lowercase_ :List[str] = word_embeddings
# The position embeddings.
lowercase_ :Dict = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase_ :Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
lowercase_ :Optional[Any] = pos_embeddings
# The transformer.
lowercase_ :Tuple = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
lowercase_ :Any = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
lowercase_ :int = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase_ :Union[str, Any] = layer_re.match(_a )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase_ :Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
lowercase_ :Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase_ :List[Any] = m.group(3 )
# The name of the layer.
lowercase_ :Dict = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
lowercase_ :Optional[int] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
lowercase_ :Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase_ :str = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _a , _a )
lowercase_ :str = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase_ :Union[str, Any] = torch.tensor(-1E4 , dtype=torch.floataa )
lowercase_ :Optional[Any] = masked_bias
lowercase_ :List[str] = fix_query_key_value_ordering(_a , _a , 3 , _a , _a )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase_ :Dict = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase_ :List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase_ :Optional[Any] = fix_query_key_value_ordering(_a , _a , 3 , _a , _a )
# Store. No change of shape.
lowercase_ :List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase_ :Optional[int] = megatron_to_transformers[op_name]
lowercase_ :Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase_ :Union[str, Any] = megatron_to_transformers[op_name]
lowercase_ :List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase_ :str = transformer['''final_layernorm.weight''']
lowercase_ :str = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase_ :List[str] = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase_ :Dict = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_a , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_a , help='''An optional config json file describing the pre-trained model.''' , )
lowercase_ :List[Any] = parser.parse_args()
# Extract the basename.
lowercase_ :Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
lowercase_ :List[Any] = torch.load(_a , map_location='''cpu''' )
else:
lowercase_ :Tuple = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
lowercase_ :Union[str, Any] = input_state_dict.get('''args''' , _a )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase_ :Tuple = '''gelu_fast'''
elif ds_args.openai_gelu:
lowercase_ :Dict = '''gelu_new'''
else:
lowercase_ :Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
lowercase_ :Union[str, Any] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
lowercase_ :List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=_a , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_a , summary_activation=_a , summary_proj_to_labels=_a , summary_first_dropout=0.1 , scale_attn_weights=_a , use_cache=_a , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
lowercase_ :Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
lowercase_ :List[Any] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
lowercase_ :Optional[Any] = convert_megatron_checkpoint(_a , _a , _a )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_a , _a )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase_ :List[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase_ :Any = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
lowercase_ :Dict = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
lowercase_ :Optional[int] = '''gpt2'''
lowercase_ :List[Any] = AutoTokenizer.from_pretrained(_a )
lowercase_ :Tuple = type(_a ).__name__
lowercase_ :Dict = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_a )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_a )
# Store the state_dict to file.
lowercase_ :Union[str, Any] = os.path.join(_a , '''pytorch_model.bin''' )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_a , _a )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 257
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : int = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Dict = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Any = logging.WARNING
_lowerCamelCase : Any = True
def A__ ( ) ->Any:
__A =os.getenv('''TRANSFORMERS_VERBOSITY''' , __A )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def A__ ( ) ->str:
return __name__.split('''.''' )[0]
def A__ ( ) ->logging.Logger:
return logging.getLogger(_get_library_name() )
def A__ ( ) ->None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__A =logging.StreamHandler() # Set sys.stderr as stream.
__A =sys.stderr.flush
# Apply our default configuration to the library root logger.
__A =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__A =False
def A__ ( ) ->None:
global _default_handler
with _lock:
if not _default_handler:
return
__A =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__A =None
def A__ ( ) ->int:
return log_levels
def A__ ( __A : Optional[str] = None ) ->logging.Logger:
if name is None:
__A =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(__A )
def A__ ( ) ->int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A__ ( __A : int ) ->None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__A )
def A__ ( ) ->Dict:
return set_verbosity(__A )
def A__ ( ) ->Optional[Any]:
return set_verbosity(__A )
def A__ ( ) ->Optional[int]:
return set_verbosity(__A )
def A__ ( ) ->Optional[Any]:
return set_verbosity(__A )
def A__ ( ) ->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A__ ( ) ->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A__ ( __A : logging.Handler ) ->None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__A )
def A__ ( __A : logging.Handler ) ->None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__A )
def A__ ( ) ->None:
_configure_library_root_logger()
__A =False
def A__ ( ) ->None:
_configure_library_root_logger()
__A =True
def A__ ( ) ->None:
__A =_get_library_root_logger().handlers
for handler in handlers:
__A =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(__A )
def A__ ( ) ->None:
__A =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__A )
def A__ ( self : Any , *__A : Tuple , **__A : Optional[int] ) ->List[Any]:
__A =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __A )
if no_advisory_warnings:
return
self.warning(*__A , **__A )
_lowerCamelCase : Optional[int] = warning_advice
@functools.lru_cache(__A )
def A__ ( self : Tuple , *__A : Any , **__A : int ) ->Union[str, Any]:
self.warning(*__A , **__A )
_lowerCamelCase : List[str] = warning_once
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ): # pylint: disable=unused-argument
'''simple docstring'''
__A =args[0] if args else None
def __iter__( self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , lowercase__ ):
'''simple docstring'''
def empty_fn(*lowercase__ , **lowercase__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
'''simple docstring'''
return self
def __exit__( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
return
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowercase__ , **lowercase__ )
else:
return EmptyTqdm(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
__A =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Dict = _tqdm_cls()
def A__ ( ) ->bool:
global _tqdm_active
return bool(_tqdm_active )
def A__ ( ) ->List[str]:
global _tqdm_active
__A =True
hf_hub_utils.enable_progress_bars()
def A__ ( ) ->Union[str, Any]:
global _tqdm_active
__A =False
hf_hub_utils.disable_progress_bars()
| 516
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =(3, 3_2, 1_2_8)
__A =tempfile.mkdtemp()
# fmt: off
__A =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__A =dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
__A ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 3_2, '''width''': 1_2_8},
}
__A =os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase__ , lowercase__ )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
__A =Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_tokenizer()
__A =self.get_image_processor()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
__A =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_tokenizer()
__A =self.get_image_processor()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
__A =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__A =self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
__A =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =self.prepare_image_inputs()
__A =image_processor(lowercase__ , return_tensors='''np''' )
__A =processor(images=lowercase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A ='''test'''
__A =processor(text=lowercase__ )
__A =tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A ='''test'''
__A =self.prepare_image_inputs()
__A =processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__A =processor.char_decode(lowercase__ )
__A =tokenizer.batch_decode(lowercase__ )
__A =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =None
__A =self.prepare_image_inputs()
__A =processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =torch.randn(1 , 2_7 , 3_8 )
__A =torch.randn(1 , 2_7 , 5_0_2_5_7 )
__A =torch.randn(1 , 2_7 , 3_0_5_2_2 )
__A =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 516
| 1
|
import argparse
import os
import re
a__: Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__: List[str] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a__: int = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : bool = False )->str:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A__ = f.read()
A__ = content.split('''\n''' )
A__ = []
A__ = 0
while line_idx < len(UpperCamelCase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
A__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A__ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A__ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : _re_identifier.search(UpperCamelCase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase__ ) )
elif "\n".join(UpperCamelCase__ ) != content:
return True
def UpperCamelCase__( UpperCamelCase__ : bool = False )->List[Any]:
A__ = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for f in os.listdir(UpperCamelCase__ ) if f.endswith('''.py''' )]
A__ = [sort_auto_mapping(UpperCamelCase__ , overwrite=UpperCamelCase__ ) for fname in fnames]
if not overwrite and any(UpperCamelCase__ ):
A__ = [f for f, d in zip(UpperCamelCase__ , UpperCamelCase__ ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(UpperCamelCase__ )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
a__: int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__: int = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 190
|
import math
def UpperCamelCase__( UpperCamelCase__ : int )->list:
A__ = [True] * n
A__ = False
A__ = False
A__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < n:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , UpperCamelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase__ )
return primes
def UpperCamelCase__( UpperCamelCase__ : int = 99_99_66_66_33_33 )->int:
A__ = math.floor(math.sqrt(UpperCamelCase__ ) ) + 1_00
A__ = prime_sieve(UpperCamelCase__ )
A__ = 0
A__ = 0
A__ = primes[prime_index]
while (last_prime**2) <= limit:
A__ = primes[prime_index + 1]
A__ = last_prime**2
A__ = next_prime**2
# Get numbers divisible by lps(current)
A__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 190
| 1
|
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=lowerCAmelCase):
_a = ['''torch''', '''scipy''']
def __init__( self: List[str] , *_lowerCAmelCase: Tuple , **_lowerCAmelCase: str ):
requires_backends(self , ["torch", "scipy"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Tuple , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: List[Any] ):
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: List[str] , *_lowerCAmelCase: Optional[int] , **_lowerCAmelCase: str ):
requires_backends(cls , ["torch", "scipy"] )
| 453
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_UpperCAmelCase : List[str] = random.Random()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=1.0, lowerCamelCase=None, lowerCamelCase=None ):
if rng is None:
lowercase :Union[str, Any] = global_rng
lowercase :Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self: Optional[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any]=7 , _lowerCAmelCase: str=4_00 , _lowerCAmelCase: List[str]=20_00 , _lowerCAmelCase: Dict=20_48 , _lowerCAmelCase: Any=1_28 , _lowerCAmelCase: Any=1 , _lowerCAmelCase: List[Any]=5_12 , _lowerCAmelCase: Optional[int]=30 , _lowerCAmelCase: List[Any]=4_41_00 , ):
lowercase :Any = parent
lowercase :Any = batch_size
lowercase :Optional[int] = min_seq_length
lowercase :Optional[Any] = max_seq_length
lowercase :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase :List[str] = spectrogram_length
lowercase :int = feature_size
lowercase :Union[str, Any] = num_audio_channels
lowercase :Optional[int] = hop_length
lowercase :str = chunk_length
lowercase :List[str] = sampling_rate
def SCREAMING_SNAKE_CASE ( self: str ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: int=False ):
def _flatten(_lowerCAmelCase: Optional[Any] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
lowercase :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase :Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase :Optional[int] = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "sampling_rate" ) )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :Tuple = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
lowercase :Tuple = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
lowercase :Optional[Any] = feat_extract_first.to_dict()
lowercase :List[str] = feat_extract_second.to_dict()
lowercase :Any = dict_first.pop("mel_filters" )
lowercase :Any = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :Any = os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
lowercase :List[Any] = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
lowercase :Tuple = feat_extract_first.to_dict()
lowercase :int = feat_extract_second.to_dict()
lowercase :Tuple = dict_first.pop("mel_filters" )
lowercase :Tuple = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
# Initialize feature_extractor
lowercase :int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowercase :List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase :Union[str, Any] = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase :Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowercase :Optional[int] = feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowercase :Dict = feature_extractor(
_lowerCAmelCase , return_tensors="np" , sampling_rate=4_41_00 , mask_audio=_lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowercase :Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowercase :Union[str, Any] = np.asarray(_lowerCAmelCase )
lowercase :Dict = feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Optional[Any] ):
lowercase :str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase :Any = ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Optional[int] = self._load_datasamples(1 )
lowercase :Union[str, Any] = TvltFeatureExtractor()
lowercase :str = feature_extractor(_lowerCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
lowercase :Optional[int] = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _lowerCAmelCase , atol=1e-4 ) )
| 453
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
'''simple docstring'''
_A = int(np.ceil((x_end - xa) / step_size ) )
_A = np.zeros((n + 1,) )
_A = ya
_A = xa
for k in range(__lowercase ):
_A = y[k] + step_size * ode_func(__lowercase , y[k] )
_A = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : str=128 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = NezhaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_A = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = True
_A = NezhaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = NezhaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = NezhaForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = NezhaForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = NezhaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = self.num_labels
_A = NezhaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = self.num_labels
_A = NezhaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.num_choices
_A = NezhaForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = True
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : int=False ):
'''simple docstring'''
_A = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
_A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = NezhaModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NezhaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_A = True
_A = model_class(config=__UpperCAmelCase )
_A = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_A = torch.jit.trace(
__UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , "bert.pt" ) )
_A = torch.jit.load(os.path.join(__UpperCAmelCase , "bert.pt" ) , map_location=__UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(__UpperCAmelCase ) , inputs_dict["attention_mask"].to(__UpperCAmelCase ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
_A = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_A = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __UpperCAmelCase )
_A = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 1
|
'''simple docstring'''
import qiskit
def lowercase_ ( lowercase__ , lowercase__ ) ->qiskit.result.counts.Counts:
_snake_case: Tuple = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_snake_case: List[Any] = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case: List[str] = qiskit.execute(lowercase__ , lowercase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
A : str = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 703
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase ( __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = "Speech2TextFeatureExtractor"
_SCREAMING_SNAKE_CASE = "Speech2TextTokenizer"
def __init__( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
_snake_case: Optional[int] = self.feature_extractor
_snake_case: Dict = False
def __call__( self : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_snake_case: int = kwargs.pop('raw_speech' )
else:
_snake_case: Tuple = kwargs.pop('audio' , __snake_case )
_snake_case: Optional[Any] = kwargs.pop('sampling_rate' , __snake_case )
_snake_case: Optional[Any] = kwargs.pop('text' , __snake_case )
if len(__snake_case ) > 0:
_snake_case: Optional[Any] = args[0]
_snake_case: Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_snake_case: List[Any] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
_snake_case: Tuple = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case: List[Any] = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , *__snake_case : Optional[int] , **__snake_case : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *__snake_case : List[str] , **__snake_case : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_snake_case: Any = True
_snake_case: str = self.tokenizer
yield
_snake_case: Union[str, Any] = self.feature_extractor
_snake_case: Tuple = False
| 273
| 0
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCamelCase ( *UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , "r" ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__magic_name__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__magic_name__ = torch.device('''cuda''', local_rank)
__magic_name__ = socket.gethostname()
__magic_name__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__magic_name__ = dist.get_rank()
__magic_name__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 657
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
lowerCAmelCase__ : int = None
lowerCAmelCase__ : int = {
"7B": 1_10_08,
"13B": 1_38_24,
"30B": 1_79_20,
"65B": 2_20_16,
"70B": 2_86_72,
}
lowerCAmelCase__ : List[str] = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=1, _UpperCAmelCase=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __UpperCamelCase ( _UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as f:
return json.load(_UpperCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
with open(_UpperCAmelCase, "w" ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=True ):
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase )
__UpperCAmelCase : Dict = os.path.join(_UpperCAmelCase, "tmp" )
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase )
__UpperCAmelCase : Any = read_json(os.path.join(_UpperCAmelCase, "params.json" ) )
__UpperCAmelCase : str = NUM_SHARDS[model_size]
__UpperCAmelCase : Union[str, Any] = params["n_layers"]
__UpperCAmelCase : Tuple = params["n_heads"]
__UpperCAmelCase : Optional[int] = n_heads // num_shards
__UpperCAmelCase : Any = params["dim"]
__UpperCAmelCase : Dict = dim // n_heads
__UpperCAmelCase : Dict = 10_000.0
__UpperCAmelCase : List[str] = 1.0 / (base ** (torch.arange(0, _UpperCAmelCase, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__UpperCAmelCase : Optional[int] = params["n_kv_heads"] # for GQA / MQA
__UpperCAmelCase : Optional[Any] = n_heads_per_shard // num_key_value_heads
__UpperCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__UpperCAmelCase : Dict = n_heads
__UpperCAmelCase : str = n_heads_per_shard
__UpperCAmelCase : Tuple = dim
# permute for sliced rotary
def permute(_UpperCAmelCase, _UpperCAmelCase=n_heads, _UpperCAmelCase=dim, _UpperCAmelCase=dim ):
return w.view(_UpperCAmelCase, dima // n_heads // 2, 2, _UpperCAmelCase ).transpose(1, 2 ).reshape(_UpperCAmelCase, _UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__UpperCAmelCase : List[str] = torch.load(os.path.join(_UpperCAmelCase, "consolidated.00.pth" ), map_location="cpu" )
else:
# Sharded
__UpperCAmelCase : Any = [
torch.load(os.path.join(_UpperCAmelCase, F"consolidated.{i:02d}.pth" ), map_location="cpu" )
for i in range(_UpperCAmelCase )
]
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Union[str, Any] = {"weight_map": {}}
for layer_i in range(_UpperCAmelCase ):
__UpperCAmelCase : List[Any] = F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
__UpperCAmelCase : str = {
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__UpperCAmelCase : str = {
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
__UpperCAmelCase : List[Any] = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for i in range(_UpperCAmelCase )
], dim=0, ).reshape(_UpperCAmelCase, _UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for i in range(_UpperCAmelCase )
], dim=0, ).reshape(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
__UpperCAmelCase : List[Any] = torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for i in range(_UpperCAmelCase )
], dim=0, ).reshape(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )], dim=1 )
__UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )], dim=0 )
__UpperCAmelCase : int = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )], dim=1 )
__UpperCAmelCase : Tuple = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )], dim=0 )
__UpperCAmelCase : str = inv_freq
for k, v in state_dict.items():
__UpperCAmelCase : Dict = filename
param_count += v.numel()
torch.save(_UpperCAmelCase, os.path.join(_UpperCAmelCase, _UpperCAmelCase ) )
__UpperCAmelCase : Optional[Any] = F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
__UpperCAmelCase : Any = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
__UpperCAmelCase : Any = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(_UpperCAmelCase )], dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(_UpperCAmelCase )], dim=0 ),
}
for k, v in state_dict.items():
__UpperCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_UpperCAmelCase, os.path.join(_UpperCAmelCase, _UpperCAmelCase ) )
# Write configs
__UpperCAmelCase : str = {"total_size": param_count * 2}
write_json(_UpperCAmelCase, os.path.join(_UpperCAmelCase, "pytorch_model.bin.index.json" ) )
__UpperCAmelCase : Dict = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
__UpperCAmelCase : Dict = params["multiple_of"] if "multiple_of" in params else 256
__UpperCAmelCase : int = LlamaConfig(
hidden_size=_UpperCAmelCase, intermediate_size=compute_intermediate_size(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=_UpperCAmelCase, )
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
__UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCAmelCase, torch_dtype=torch.floataa, low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(_UpperCAmelCase, safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Initialize the tokenizer based on the `spm` model
__UpperCAmelCase : Any = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
__UpperCAmelCase : Union[str, Any] = tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def __UpperCamelCase ( ):
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", )
parser.add_argument(
"--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], )
parser.add_argument(
"--output_dir", help="Location to write HF model and tokenizer", )
parser.add_argument("--safe_serialization", type=_UpperCAmelCase, help="Whether or not to save using `safetensors`." )
__UpperCAmelCase : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
__UpperCAmelCase : int = os.path.join(args.input_dir, "tokenizer.model" )
write_tokenizer(args.output_dir, _UpperCAmelCase )
if __name__ == "__main__":
main()
| 329
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : int = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : int = load_image(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Optional[int] = candidate_labels
__UpperCAmelCase : Dict = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Any = text_inputs[0][0]
__UpperCAmelCase : int = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Optional[int] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Tuple = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Optional[int] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : Tuple = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 329
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ = 128022
snake_case_ = 128028
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = MaMaaaTokenizer
A_ : int = False
A_ : Tuple = False
A_ : List[Any] = True
def a (self : Any ):
"""simple docstring"""
super().setUp()
__snake_case = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(a__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__snake_case = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : Union[str, Any] , **a__ : Tuple ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : List[Any] , a__ : Any ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def a (self : Tuple ):
"""simple docstring"""
__snake_case = '''</s>'''
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(a__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [2, 3, 4, 5, 6] , )
__snake_case = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
__snake_case = tokenizer.convert_tokens_to_string(a__ )
self.assertEqual(a__ , '''This is a test''' )
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = 'facebook/m2m100_418M'
A_ : int = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
A_ : Union[str, Any] = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
A_ : Union[str, Any] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def a (cls : Union[str, Any] ):
"""simple docstring"""
__snake_case = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
__snake_case = 1
return cls
def a (self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.get_vocab()
self.assertEqual(len(a__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = '''en'''
__snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.assertIn(a__ , self.tokenizer.all_special_ids )
# fmt: off
__snake_case = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
__snake_case = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
__snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(a__ )
__snake_case = MaMaaaTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.lang_token_to_id , a__ )
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = '''en'''
__snake_case = '''fr'''
__snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a__ , return_tensors='''pt''' )
__snake_case = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__snake_case = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a (self : List[str] ):
"""simple docstring"""
__snake_case = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__snake_case = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def a (self : Any ):
"""simple docstring"""
__snake_case = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__snake_case = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(a__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 592
|
import math
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
__snake_case = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case_ )
def lowerCamelCase__ ( snake_case_ : float = 1 / 1_2345 ) -> int:
__snake_case = 0
__snake_case = 0
__snake_case = 3
while True:
__snake_case = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case_ ):
__snake_case = int(snake_case_ )
total_partitions += 1
if check_partition_perfect(snake_case_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case_ )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 592
| 1
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( UpperCamelCase):
def __init__( self : Optional[Any] , _A : List[Any]=0.01 , _A : Dict=10_00 ) -> List[str]:
UpperCAmelCase_ : int = p_stop
UpperCAmelCase_ : Any = max_length
def __iter__( self : List[Any] ) -> Any:
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase_ : Dict = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase):
def A ( self : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : List[Any]=False , _A : Optional[Any]=True ) -> int:
UpperCAmelCase_ : Optional[Any] = [
BatchSamplerShard(_A , 2 , _A , split_batches=_A , even_batches=_A )
for i in range(2 )
]
UpperCAmelCase_ : Any = [list(_A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_A ) for shard in batch_sampler_shards] , [len(_A ) for e in expected] )
self.assertListEqual(_A , _A )
def A ( self : int ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase_ : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A )
UpperCAmelCase_ : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_A , _A )
UpperCAmelCase_ : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_A , _A )
UpperCAmelCase_ : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_A , _A )
UpperCAmelCase_ : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_A , _A )
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Dict = [[], []]
self.check_batch_sampler_shards(_A , _A )
def A ( self : Any ) -> int:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
UpperCAmelCase_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
UpperCAmelCase_ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
UpperCAmelCase_ : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : str = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
def A ( self : Dict ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase_ : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
UpperCAmelCase_ : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
UpperCAmelCase_ : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
UpperCAmelCase_ : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
UpperCAmelCase_ : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
UpperCAmelCase_ : Dict = [[], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
def A ( self : str ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
UpperCAmelCase_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
UpperCAmelCase_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
UpperCAmelCase_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase_ : Union[str, Any] = [BatchSamplerShard(_A , 2 , _A , even_batches=_A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A ( self : Tuple , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : List[Any]=False , _A : Optional[Any]=2 , _A : int=False ) -> int:
random.seed(_A )
UpperCAmelCase_ : str = list(_A )
UpperCAmelCase_ : Optional[Any] = [
IterableDatasetShard(
_A , batch_size=_A , drop_last=_A , num_processes=_A , process_index=_A , split_batches=_A , )
for i in range(_A )
]
UpperCAmelCase_ : List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_A )
iterable_dataset_lists.append(list(_A ) )
UpperCAmelCase_ : int = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase_ : Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(len(_A ) % shard_batch_size == 0 )
UpperCAmelCase_ : Optional[int] = []
for idx in range(0 , len(_A ) , _A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_A ) < len(_A ):
reference += reference
self.assertListEqual(_A , reference[: len(_A )] )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = 42
UpperCAmelCase_ : List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
# Edge case with a very small dataset
UpperCAmelCase_ : Any = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=_A )
UpperCAmelCase_ : List[str] = SkipBatchSampler(_A , 2 )
self.assertListEqual(list(_A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : str ) -> Dict:
UpperCAmelCase_ : Any = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCAmelCase_ : str = skip_first_batches(_A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : int ) -> List[Any]:
UpperCAmelCase_ : str = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A ( self : Union[str, Any] ) -> str:
Accelerator()
UpperCAmelCase_ : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 216
|
'''simple docstring'''
from collections import deque
def __UpperCAmelCase ( A : int ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = len(A )
UpperCAmelCase_ : Dict = deque()
UpperCAmelCase_ : Optional[Any] = [False for _ in range(A )]
UpperCAmelCase_ : str = [-1 for _ in range(A )]
UpperCAmelCase_ : Union[str, Any] = index_of[:]
def strong_connect(A : Union[str, Any] , A : Optional[Any] , A : List[Any] ):
UpperCAmelCase_ : Union[str, Any] = index # the number when this node is seen
UpperCAmelCase_ : List[Any] = index # lowest rank node reachable from here
index += 1
stack.append(A )
UpperCAmelCase_ : Optional[int] = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase_ : str = strong_connect(A , A , A )
UpperCAmelCase_ : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase_ : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Tuple = stack.pop()
UpperCAmelCase_ : Dict = False
component.append(A )
while w != v:
UpperCAmelCase_ : Optional[int] = stack.pop()
UpperCAmelCase_ : str = False
component.append(A )
components.append(A )
return index
UpperCAmelCase_ : str = []
for v in range(A ):
if index_of[v] == -1:
strong_connect(A , 0 , A )
return components
def __UpperCAmelCase ( A : List[Any] , A : str ) -> List[str]:
UpperCAmelCase_ : List[Any] = [[] for _ in range(A )]
for u, v in edges:
g[u].append(A )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase : int = 7
_UpperCamelCase : List[str] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase : Optional[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase : Optional[Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 216
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : str ):
"""simple docstring"""
_snake_case = feature_size
_snake_case = sampling_rate
_snake_case = padding_value
_snake_case = kwargs.pop('''padding_side''' , '''right''' )
_snake_case = kwargs.pop('''return_attention_mask''' , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
_snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_snake_case = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
_snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
_snake_case = '''tf'''
elif is_torch_tensor(__lowerCamelCase ):
_snake_case = '''pt'''
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
_snake_case = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_snake_case = to_numpy(__lowerCamelCase )
else:
_snake_case = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_snake_case = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_snake_case = []
for i in range(__lowerCamelCase ):
_snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
_snake_case = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_snake_case = PaddingStrategy.MAX_LENGTH
_snake_case = {}
for i in range(__lowerCamelCase ):
# padding
_snake_case = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_snake_case = []
if value.dtype is np.dtype(np.floataa ):
_snake_case = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_snake_case = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_snake_case = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_snake_case = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
_snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : Any=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
_snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = padding
else:
_snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 103
|
import torch
from torch import nn
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
else:
self.out_projs.append(_SCREAMING_SNAKE_CASE )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , r_idx - l_idx ) )
UpperCamelCase = keep_order
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -100
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , _SCREAMING_SNAKE_CASE ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden.index_select(0 , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _SCREAMING_SNAKE_CASE , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 280
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Dict =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
a = ["""input_features""", """is_longer"""]
def __init__( self: Dict , UpperCamelCase__: Optional[Any]=64 , UpperCamelCase__: Dict=48_000 , UpperCamelCase__: Any=480 , UpperCamelCase__: str=10 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: List[str]=False , UpperCamelCase__: float = 0 , UpperCamelCase__: float = 14_000 , UpperCamelCase__: int = None , UpperCamelCase__: str = "fusion" , UpperCamelCase__: str = "repeatpad" , **UpperCamelCase__: Any , ):
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = top_db
lowerCamelCase__ : Tuple = truncation
lowerCamelCase__ : Any = padding
lowerCamelCase__ : str = fft_window_size
lowerCamelCase__ : Dict = (fft_window_size >> 1) + 1
lowerCamelCase__ : str = hop_length
lowerCamelCase__ : List[str] = max_length_s
lowerCamelCase__ : Any = max_length_s * sampling_rate
lowerCamelCase__ : Optional[int] = sampling_rate
lowerCamelCase__ : Tuple = frequency_min
lowerCamelCase__ : Tuple = frequency_max
lowerCamelCase__ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale="""htk""" , )
lowerCamelCase__ : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: np.array , UpperCamelCase__: Optional[np.array] = None ):
lowerCamelCase__ : List[str] = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict ):
lowerCamelCase__ : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase__ : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase__ : List[str] = [0]
# randomly choose index for each part
lowerCamelCase__ : List[Any] = np.random.choice(ranges[0] )
lowerCamelCase__ : List[str] = np.random.choice(ranges[1] )
lowerCamelCase__ : Union[str, Any] = np.random.choice(ranges[2] )
lowerCamelCase__ : List[Any] = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase__ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase__ : List[str] = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase__ : Tuple = torch.tensor(mel[None, None, :] )
lowerCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = mel_shrink[0][0].numpy()
lowerCamelCase__ : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: np.array , UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: Any ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase__ : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) - max_length
lowerCamelCase__ : Any = np.random.randint(0 , overflow + 1 )
lowerCamelCase__ : Tuple = waveform[idx : idx + max_length]
lowerCamelCase__ : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase__ : Tuple = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
lowerCamelCase__ : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase__ : Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase__ : Tuple = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase__ : Dict = False
else:
lowerCamelCase__ : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
lowerCamelCase__ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase__ : Union[str, Any] = int(max_length / len(UpperCamelCase__ ) )
lowerCamelCase__ : Dict = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase__ : Dict = int(max_length / len(UpperCamelCase__ ) )
lowerCamelCase__ : int = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : str = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCamelCase__ : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
lowerCamelCase__ : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase__ : Optional[int] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: Dict , UpperCamelCase__: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__: str = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : List[Any] = truncation if truncation is not None else self.truncation
lowerCamelCase__ : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase__ : str = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ : List[Any] = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Dict = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
lowerCamelCase__ : Optional[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase__ : str = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
lowerCamelCase__ : str = []
lowerCamelCase__ : int = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase__ : Optional[Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
lowerCamelCase__ : int = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase__ : Optional[Any] = [[longer] for longer in is_longer]
lowerCamelCase__ : Any = {"""input_features""": input_mel, """is_longer""": is_longer}
lowerCamelCase__ : Union[str, Any] = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
lowerCamelCase__ : str = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 631
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = GPTSwaTokenizer
lowerCAmelCase : Tuple = False
lowerCAmelCase : Tuple = True
lowerCAmelCase : int = False
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any] = GPTSwaTokenizer(_snake_case ,eos_token='''<unk>''' ,bos_token='''<unk>''' ,pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : str ,_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = '''This is a test'''
lowercase__ : Dict = '''This is a test'''
return input_text, output_text
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''<s>'''
lowercase__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''j''' )
self.assertEqual(len(_snake_case ) ,2_000 )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,2_000 )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = GPTSwaTokenizer(_snake_case )
lowercase__ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,[465, 287, 265, 631, 842] )
lowercase__ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
_snake_case ,['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ,)
# fmt: on
lowercase__ : Dict = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case ,[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
lowercase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
# fmt: off
self.assertListEqual(
_snake_case ,['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : int = GPTSwaTokenizer(_snake_case )
lowercase__ : str = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowercase__ : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_snake_case ,_snake_case ):
self.assertListEqual(tokenizer.encode_fast(_snake_case ) ,_snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(_snake_case ,_snake_case ):
self.assertEqual(tokenizer.decode_fast(_snake_case ) ,_snake_case )
@slow
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowercase__ : List[Any] = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name='''AI-Sweden/gpt-sw3-126m''' ,sequences=_snake_case ,)
| 560
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase_ = data_utils.TransfoXLTokenizer
lowerCAmelCase_ = data_utils.TransfoXLCorpus
lowerCAmelCase_ = data_utils
lowerCAmelCase_ = data_utils
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCamelCase , '''rb''' ) as fp:
lowercase__ : Dict = pickle.load(__lowerCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase__ : int = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase__ : List[Any] = corpus.vocab.__dict__
torch.save(__lowerCamelCase , __lowerCamelCase )
lowercase__ : int = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCamelCase )
lowercase__ : List[str] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase__ : Tuple = os.path.abspath(__lowerCamelCase )
lowercase__ : List[Any] = os.path.abspath(__lowerCamelCase )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase__ : Tuple = TransfoXLConfig()
else:
lowercase__ : List[str] = TransfoXLConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : Union[str, Any] = TransfoXLLMHeadModel(__lowerCamelCase )
lowercase__ : List[Any] = load_tf_weights_in_transfo_xl(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 560
| 1
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCamelCase : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
UpperCamelCase : Optional[int] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test"
| 435
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 10
UpperCamelCase : Optional[Any] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
"id": datasets.Value("int64"),
})
UpperCamelCase : Union[str, Any] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(A)),
} , features=A , )
return dataset
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : int = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=A)
return filename
# FILE_CONTENT + files
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "file.txt"
UpperCamelCase : List[str] = FILE_CONTENT
with open(A , "w") as f:
f.write(A)
return filename
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
import bza
UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data") / "file.txt.bz2"
UpperCamelCase : Optional[int] = bytes(A , "utf-8")
with bza.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
import gzip
UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
UpperCamelCase : List[str] = bytes(A , "utf-8")
with gzip.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : str):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase : str = tmp_path_factory.mktemp("data") / "file.txt.lz4"
UpperCamelCase : Dict = bytes(A , "utf-8")
with lza.frame.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : Tuple):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "file.txt.7z"
with pyazr.SevenZipFile(A , "w") as archive:
archive.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : Dict):
'''simple docstring'''
import tarfile
UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data") / "file.txt.tar"
with tarfile.TarFile(A , "w") as f:
f.add(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Any):
'''simple docstring'''
import lzma
UpperCamelCase : int = tmp_path_factory.mktemp("data") / "file.txt.xz"
UpperCamelCase : Union[str, Any] = bytes(A , "utf-8")
with lzma.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Tuple , A : List[Any]):
'''simple docstring'''
import zipfile
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "file.txt.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[Any]):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "file.txt.zst"
UpperCamelCase : List[str] = bytes(A , "utf-8")
with zstd.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "file.xml"
UpperCamelCase : Any = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>")
with open(A , "w") as f:
f.write(A)
return filename
lowerCAmelCase_ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : Dict = datasets.Dataset.from_dict(A)
UpperCamelCase : Tuple = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : str = str(tmp_path_factory.mktemp("data") / "dataset.sqlite")
with contextlib.closing(sqlitea.connect(A)) as con:
UpperCamelCase : str = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)")
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="session")
def A__ ( A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(A , "w" , newline="") as f:
UpperCamelCase : Any = csv.DictWriter(A , fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset2.csv")
with open(A , "w" , newline="") as f:
UpperCamelCase : Dict = csv.DictWriter(A , fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict , A : Any):
'''simple docstring'''
import bza
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(A , "rb") as f:
UpperCamelCase : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(A , "wb") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any] , A : int , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : Optional[Any] , A : Tuple):
'''simple docstring'''
UpperCamelCase : Tuple = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV")))
f.write(A , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV")))
return path
@pytest.fixture(scope="session")
def A__ ( A : str , A : int , A : int):
'''simple docstring'''
UpperCamelCase : int = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
UpperCamelCase : Union[str, Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
})
with open(A , "wb") as f:
UpperCamelCase : Optional[int] = pq.ParquetWriter(A , schema=A)
UpperCamelCase : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(A))] for k in DATA[0]} , schema=A)
writer.write_table(A)
writer.close()
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str]):
'''simple docstring'''
UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data") / "dataset.json")
UpperCamelCase : Any = {"data": DATA}
with open(A , "w") as f:
json.dump(A , A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Any):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.json")
UpperCamelCase : int = {"data": DATA_DICT_OF_LISTS}
with open(A , "w") as f:
json.dump(A , A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(A , "w") as f:
for item in DATA:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl")
with open(A , "w") as f:
for item in DATA:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(A , "w") as f:
for item in DATA_312:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : str = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(A , "w") as f:
for item in DATA_STR:
f.write(json.dumps(A) + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[Any] , A : Optional[Any]):
'''simple docstring'''
import gzip
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(A , "rb") as orig_file:
with gzip.open(A , "wb") as zipped_file:
zipped_file.writelines(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Tuple , A : Optional[int]):
'''simple docstring'''
import gzip
UpperCamelCase : Optional[int] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(A , "rb") as orig_file:
with gzip.open(A , "wb") as zipped_file:
zipped_file.writelines(A)
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : List[str] , A : int):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[Any] , A : Tuple , A : List[str] , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Tuple = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("nested" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : Any , A : Dict):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int] , A : str , A : str):
'''simple docstring'''
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar"
with tarfile.TarFile(A , "w") as f:
f.add(A , arcname=os.path.basename(A))
f.add(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : int , A : Dict , A : Any):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar"
with tarfile.TarFile(A , "w") as f:
f.add(A , arcname=os.path.join("nested" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ["0", "1", "2", "3"]
UpperCamelCase : Tuple = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(A , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : str):
'''simple docstring'''
UpperCamelCase : Optional[int] = ["0", "1", "2", "3"]
UpperCamelCase : List[str] = str(tmp_path_factory.mktemp("data") / "dataset2.txt")
with open(A , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : List[Any] = ["0", "1", "2", "3"]
UpperCamelCase : Any = tmp_path_factory.mktemp("data") / "dataset.abc"
with open(A , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def A__ ( A : Tuple , A : Optional[Any] , A : Any):
'''simple docstring'''
UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data") / "dataset.text.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A))
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict , A : Tuple , A : str):
'''simple docstring'''
UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
f.write(A , arcname=os.path.join("main_dir" , os.path.basename(A)))
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict , A : Any , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Dict = tmp_path_factory.mktemp("data") / "dataset.ext.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename("unsupported.ext"))
f.write(A , arcname=os.path.basename("unsupported_2.ext"))
return path
@pytest.fixture(scope="session")
def A__ ( A : str):
'''simple docstring'''
UpperCamelCase : List[Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"])
UpperCamelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt")
with open(A , "w" , encoding="utf-8") as f:
f.write(A)
return path
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg")
@pytest.fixture(scope="session")
def A__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav")
@pytest.fixture(scope="session")
def A__ ( A : List[str] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("data") / "dataset.img.zip"
with zipfile.ZipFile(A , "w") as f:
f.write(A , arcname=os.path.basename(A))
f.write(A , arcname=os.path.basename(A).replace(".jpg" , "2.jpg"))
return path
@pytest.fixture(scope="session")
def A__ ( A : Dict):
'''simple docstring'''
UpperCamelCase : Optional[int] = tmp_path_factory.mktemp("data_dir")
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "subdir" / "test.txt" , "w") as f:
f.write("bar\n" * 10)
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w") as f:
f.write("bar\n" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w") as f:
f.write("foo\n" * 10)
with open(data_dir / ".subdir" / "test.txt" , "w") as f:
f.write("bar\n" * 10)
return data_dir
| 435
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
class lowercase ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ : Tuple[int] = (64,) , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 2_56 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : float = 0.1_8215 , lowerCamelCase_ : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_snake_case : str = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
_snake_case : List[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
_snake_case : Optional[int] = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
_snake_case : int = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
_snake_case : Union[str, Any] = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
_snake_case : List[Any] = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ):
'''simple docstring'''
_snake_case : Any = self.encoder(lowerCamelCase_ )
_snake_case : List[Any] = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ):
'''simple docstring'''
if not force_not_quantize:
_snake_case , _snake_case , _snake_case : Optional[Any] = self.quantize(lowerCamelCase_ )
else:
_snake_case : Tuple = h
_snake_case : List[Any] = self.post_quant_conv(lowerCamelCase_ )
_snake_case : List[Any] = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ):
'''simple docstring'''
_snake_case : Any = sample
_snake_case : Dict = self.encode(lowerCamelCase_ ).latents
_snake_case : List[str] = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 304
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Union[str, Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowercase_ : List[str] = {'''allegro/herbert-base-cased''': 514}
lowercase_ : Union[str, Any] = {}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : int , lowerCamelCase_ : int=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Dict="<mask>" , lowerCamelCase_ : Optional[Any]="</s>" , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , **lowerCamelCase_ , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id]
_snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 304
| 1
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = TypeVar("DatasetType", Dataset, IterableDataset)
def _lowerCamelCase (__lowerCamelCase : Optional[Any] , __lowerCamelCase : str = None , __lowerCamelCase : Tuple = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = None , __lowerCamelCase : str = "first_exhausted" , ) -> str:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(__lowerCamelCase ):
if not isinstance(__lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__lowerCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCamelCase ).__name__}.''' )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__lowerCamelCase , __lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , info=__lowerCamelCase , split=__lowerCamelCase , stopping_strategy=__lowerCamelCase )
else:
return _interleave_iterable_datasets(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , info=__lowerCamelCase , split=__lowerCamelCase , stopping_strategy=__lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : Optional[Any] , __lowerCamelCase : Any = None , __lowerCamelCase : int = None , __lowerCamelCase : List[Any] = 0 , ) -> str:
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(__lowerCamelCase ):
if not isinstance(__lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__lowerCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCamelCase ).__name__}.''' )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__lowerCamelCase , __lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__lowerCamelCase , info=__lowerCamelCase , split=__lowerCamelCase , axis=__lowerCamelCase )
else:
return _concatenate_iterable_datasets(__lowerCamelCase , info=__lowerCamelCase , split=__lowerCamelCase , axis=__lowerCamelCase )
| 717
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCamelCase (__lowerCamelCase : Any ) -> Optional[int]:
a__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : int ) -> List[Any]:
a__ , a__ = emb.weight.shape
a__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
a__ = emb.weight.data
return lin_layer
def _lowerCamelCase (__lowerCamelCase : List[Any] ) -> Any:
a__ = torch.load(__lowerCamelCase , map_location="cpu" )
a__ = mam_aaa["args"] or mam_aaa["cfg"]["model"]
a__ = mam_aaa["model"]
remove_ignore_keys_(__lowerCamelCase )
a__ = state_dict["encoder.embed_tokens.weight"].shape[0]
a__ = MaMaaaConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
a__ = state_dict["decoder.embed_tokens.weight"]
a__ = MaMaaaForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
a__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 289
| 0
|
from string import ascii_lowercase, ascii_uppercase
def lowerCAmelCase__ ( a__: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
_UpperCAmelCase = dict(zip(a__ , a__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
|
from typing import Any
def lowerCAmelCase__ ( a__: list , a__: list , a__: dict , a__: dict , a__: dict , ) -> list:
'''simple docstring'''
_validation(
a__ , a__ , a__ , a__ , a__ , )
# Creates data structures and fill initial step
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for state in states_space:
_UpperCAmelCase = observations_space[0]
_UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(a__ ) ):
_UpperCAmelCase = observations_space[o]
_UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase = ''
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase = arg_max
# The final observation
_UpperCAmelCase = observations_space[len(a__ ) - 1]
# argmax for given final observation
_UpperCAmelCase = ''
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
_UpperCAmelCase = arg_max
# Process pointers backwards
_UpperCAmelCase = last_state
_UpperCAmelCase = []
for o in range(len(a__ ) - 1 , -1 , -1 ):
result.append(a__ )
_UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase__ ( a__: Any , a__: Any , a__: Any , a__: Any , a__: Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
a__ , a__ , a__ , a__ , a__ , )
_validate_lists(a__ , a__ )
_validate_dicts(
a__ , a__ , a__ )
def lowerCAmelCase__ ( a__: Any , a__: Any , a__: Any , a__: Any , a__: Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCAmelCase__ ( a__: Any , a__: Any ) -> None:
'''simple docstring'''
_validate_list(a__ , 'observations_space' )
_validate_list(a__ , 'states_space' )
def lowerCAmelCase__ ( a__: Any , a__: str ) -> None:
'''simple docstring'''
if not isinstance(_object , a__ ):
_UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(a__ )
else:
for x in _object:
if not isinstance(a__ , a__ ):
_UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(a__ )
def lowerCAmelCase__ ( a__: Any , a__: Any , a__: Any , ) -> None:
'''simple docstring'''
_validate_dict(a__ , 'initial_probabilities' , a__ )
_validate_nested_dict(a__ , 'transition_probabilities' )
_validate_nested_dict(a__ , 'emission_probabilities' )
def lowerCAmelCase__ ( a__: Any , a__: str ) -> None:
'''simple docstring'''
_validate_dict(_object , a__ , a__ )
for x in _object.values():
_validate_dict(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( a__: Any , a__: str , a__: type , a__: bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , a__ ):
_UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(a__ )
if not all(isinstance(a__ , a__ ) for x in _object ):
_UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(a__ )
if not all(isinstance(a__ , a__ ) for x in _object.values() ):
_UpperCAmelCase = 'nested dictionary ' if nested else ''
_UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowercase :
def __init__( self , _UpperCAmelCase , ):
A : int = parent
A : int = 13
A : int = 7
A : List[Any] = 30
A : Any = self.seq_length + self.mem_len
A : List[Any] = 15
A : Union[str, Any] = True
A : List[Any] = True
A : Optional[Any] = 99
A : List[str] = [10, 50, 80]
A : Dict = 32
A : Optional[int] = 32
A : List[Any] = 4
A : Dict = 8
A : str = 128
A : str = 2
A : int = 2
A : str = None
A : Dict = 1
A : Optional[Any] = 0
A : Any = 3
A : Any = self.vocab_size - 1
A : Optional[Any] = 0.01
def snake_case ( self ):
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str = None
if self.use_labels:
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Tuple = TFTransfoXLModel(_UpperCAmelCase )
A, A : Any = model(_UpperCAmelCase ).to_tuple()
A : Optional[int] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
A, A : List[str] = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Union[str, Any] = TFTransfoXLLMHeadModel(_UpperCAmelCase )
A, A : Union[str, Any] = model(_UpperCAmelCase ).to_tuple()
A : Union[str, Any] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
A, A : Any = model(_UpperCAmelCase ).to_tuple()
A, A : Tuple = model([input_ids_a, mems_a] ).to_tuple()
A : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
A, A : Dict = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Optional[int] = TFTransfoXLForSequenceClassification(_UpperCAmelCase )
A : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
A : str = self.prepare_config_and_inputs()
((A), (A), (A), (A)) : Dict = config_and_inputs
A : Optional[int] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase = () if is_tf_available() else ()
_UpperCamelCase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self ):
A : List[Any] = TFTransfoXLModelTester(self )
A : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
self.model_tester.set_seed()
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase )
def snake_case ( self ):
self.model_tester.set_seed()
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase )
def snake_case ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase )
def snake_case ( self ):
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A : int = model.get_output_embeddings()
assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer )
A : int = model.get_bias()
assert name is None
else:
A : Dict = model.get_output_embeddings()
assert x is None
A : Dict = model.get_bias()
assert name is None
def snake_case ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple = TFTransfoXLModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def snake_case ( self ):
pass
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def snake_case ( self ):
A : Dict = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
A : List[Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A : List[str] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A : Union[str, Any] = model.generate(_UpperCAmelCase , max_length=200 , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
| 701
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = XLMProphetNetTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
A : Union[str, Any] = '''[PAD]'''
A : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case ( self ):
A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def snake_case ( self ):
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
A : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def snake_case ( self ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def snake_case ( self ):
A : Union[str, Any] = '''Hello World!'''
A : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case ( self ):
# fmt: off
A : List[Any] = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 537
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 449
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A = 'main'
# Default branch name
A = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
A = 'aaaaaaa'
# This commit does not exist, so we should 404.
A = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
A = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class _a ( unittest.TestCase):
def __lowercase ( self : Dict ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class _a ( unittest.TestCase):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : Union[str, Any] , _lowercase : Optional[int] ) -> Optional[Any]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : Any , _lowercase : Any ) -> Dict:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : str , _lowercase : List[Any] ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __lowercase ( self : Any ) -> Union[str, Any]:
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
self.assertEqual(find_labels(_lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_lowercase ) , ["start_positions", "end_positions"] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
@require_tf
def __lowercase ( self : Optional[Any] ) -> int:
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
self.assertEqual(find_labels(_lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_lowercase ) , ["start_positions", "end_positions"] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
@require_flax
def __lowercase ( self : Tuple ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , [] )
| 449
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : int = ['''image_processor''', '''tokenizer''']
__lowerCamelCase : Any = '''OwlViTImageProcessor'''
__lowerCamelCase : str = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCAmelCase , )
A__ = kwargs.pop("""feature_extractor""" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any="max_length" , __lowerCAmelCase : Any="np" , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not isinstance(text[0] , __lowerCAmelCase )):
A__ = [self.tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(text[0] , __lowerCAmelCase ):
A__ = []
# Maximum number of queries across batch
A__ = max([len(__lowerCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCAmelCase ) != max_num_queries:
A__ = t + [""" """] * (max_num_queries - len(__lowerCAmelCase ))
A__ = self.tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
encodings.append(__lowerCAmelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
A__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A__ = BatchEncoding()
A__ = input_ids
A__ = attention_mask
if query_images is not None:
A__ = BatchEncoding()
A__ = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ).pixel_values
A__ = query_pixel_values
if images is not None:
A__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def a_ ( self : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process(*__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : List[Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : List[str] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCAmelCase , )
return self.image_processor_class
@property
def a_ ( self : Any ) -> Any:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCAmelCase , )
return self.image_processor
| 247
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( __a :Dict ) -> List[Any]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( __a :str ) -> int:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( __a :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = [state.process_index]
A__ = gather_object(__a )
assert len(__a ) == state.num_processes, F'{gathered_obj}, {len(__a )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( __a :Optional[int] ) -> Dict:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( __a :List[str] ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
A__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A__ = torch.arange(state.num_processes ).to(state.device )
A__ = pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( __a :Optional[int] ) -> Tuple:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """sum""" )
A__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :str ) -> List[str]:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """mean""" )
A__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main()
| 247
| 1
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase =logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''input_values''', '''padding_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 2_4_0_0_0 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = chunk_length_s
UpperCamelCase__ : Any = overlap
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[str] = bool(
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase__ : Union[str, Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase__ : Any = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase__ : Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ : int = [np.asarray(__SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(__SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
UpperCamelCase__ : str = None
UpperCamelCase__ : Optional[int] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase__ : List[str] = min(array.shape[0] for array in raw_audio )
UpperCamelCase__ : str = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase__ : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase__ : Any = max(array.shape[0] for array in raw_audio )
UpperCamelCase__ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase__ : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase__ : Union[str, Any] = '''max_length'''
else:
UpperCamelCase__ : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase__ : Optional[int] = self.pad(
__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
if padding:
UpperCamelCase__ : str = padded_inputs.pop('''attention_mask''' )
UpperCamelCase__ : Optional[Any] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
UpperCamelCase__ : Optional[Any] = example[..., None]
input_values.append(example.T )
UpperCamelCase__ : List[Any] = input_values
if return_tensors is not None:
UpperCamelCase__ : Tuple = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 285
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase ={
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 285
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase__ : Tuple = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = max(
mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1]) + val[i - 1] , )
UpperCamelCase__ : List[str] = val
return f[i][j]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Any = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
UpperCamelCase__ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
UpperCamelCase__ : str = dp[i - 1][w_]
return dp[n][w_], dp
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
if not (isinstance(lowerCamelCase_ , (list, tuple)) and isinstance(lowerCamelCase_ , (list, tuple))):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples')
UpperCamelCase__ : Tuple = len(lowerCamelCase_)
if num_items != len(lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = (
'The number of weights must be the same as the number of values.\n'
f'But got {num_items} weights and {len(lowerCamelCase_)} values'
)
raise ValueError(lowerCamelCase_)
for i in range(lowerCamelCase_):
if not isinstance(wt[i] , lowerCamelCase_):
UpperCamelCase__ : str = (
'All weights must be integers but got weight of '
f'type {type(wt[i])} at index {i}'
)
raise TypeError(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : List[Any] = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : set = set()
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
return optimal_val, example_optional_set
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_)
else:
optimal_set.add(lowerCamelCase_)
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = [3, 2, 4, 4]
lowerCAmelCase__ = [4, 3, 2, 3]
lowerCAmelCase__ = 4
lowerCAmelCase__ = 6
lowerCAmelCase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase__ , lowerCAmelCase__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase__ , lowerCAmelCase__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 6
|
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6
| 1
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A_ = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = question_encoder
lowerCamelCase_ = generator
lowerCamelCase_ = self.question_encoder
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'question_encoder_tokenizer' )
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase_ = kwargs.pop('config' , SCREAMING_SNAKE_CASE_ )
if config is None:
lowerCamelCase_ = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.question_encoder
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.generator
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "longest" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE_ , )
if max_length is None:
lowerCamelCase_ = self.current_tokenizer.model_max_length
lowerCamelCase_ = self(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase_ = self.current_tokenizer.model_max_length
lowerCamelCase_ = self(
text_target=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = labels['input_ids']
return model_inputs
| 42
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _lowerCAmelCase ( _snake_case ):
"""simple docstring"""
@slow
@require_torch
def UpperCAmelCase_ ( self ) -> int:
A_ : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
A_ : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A_ : List[str] = bertabert.config.encoder.vocab_size
A_ : List[str] = tokenizer.sep_token_id
A_ : Dict = tokenizer.cls_token_id
A_ : List[Any] = 128
A_ : List[Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
A_ : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
A_ : Optional[int] = train_dataset.select(range(32 ) )
A_ : Union[str, Any] = val_dataset.select(range(16 ) )
A_ : Optional[Any] = 4
def _map_to_encoder_decoder_inputs(_lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A_ : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_lowerCamelCase , max_length=512 )
A_ : List[str] = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_lowerCamelCase , max_length=128 )
A_ : int = inputs.input_ids
A_ : int = inputs.attention_mask
A_ : Optional[Any] = outputs.input_ids
A_ : Optional[Any] = outputs.input_ids.copy()
A_ : List[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
A_ : Optional[int] = outputs.attention_mask
assert all(len(_lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(_lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCamelCase ):
A_ : List[Any] = pred.label_ids
A_ : List[Any] = pred.predictions
# all unnecessary tokens are removed
A_ : Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
A_ : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCamelCase ) )] ) / len(_lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
A_ : Optional[int] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCamelCase , batch_size=_lowerCamelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
A_ : Optional[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCamelCase , batch_size=_lowerCamelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : Tuple = SeqaSeqTrainingArguments(
output_dir=_lowerCamelCase , per_device_train_batch_size=_lowerCamelCase , per_device_eval_batch_size=_lowerCamelCase , predict_with_generate=_lowerCamelCase , evaluation_strategy="""steps""" , do_train=_lowerCamelCase , do_eval=_lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A_ : Optional[int] = SeqaSeqTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# start training
trainer.train()
| 713
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( a_ , a_=1 ) -> str:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase ( a_ , a_=0 ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = []
for old_item in old_list:
A_ : List[str] = old_item.replace("""in_layers.0""" , """norm1""" )
A_ : Tuple = new_item.replace("""in_layers.2""" , """conv1""" )
A_ : List[Any] = new_item.replace("""out_layers.0""" , """norm2""" )
A_ : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
A_ : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
A_ : Optional[int] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
A_ : int = shave_segments(a_ , n_shave_prefix_segments=a_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase ( a_ , a_=0 ) -> Union[str, Any]:
"""simple docstring"""
A_ : Any = []
for old_item in old_list:
A_ : Optional[int] = old_item
A_ : Dict = new_item.replace("""norm.weight""" , """group_norm.weight""" )
A_ : Optional[int] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
A_ : Union[str, Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
A_ : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
A_ : Union[str, Any] = shave_segments(a_ , n_shave_prefix_segments=a_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
A_ : Optional[int] = old_checkpoint[path]
A_ : Union[str, Any] = old_tensor.shape[0] // 3
A_ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
A_ : Any = old_tensor.shape[0] // config["""num_head_channels"""] // 3
A_ : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
A_ , A_ , A_ : Tuple = old_tensor.split(channels // num_heads , dim=1 )
A_ : List[str] = query.reshape(a_ )
A_ : Union[str, Any] = key.reshape(a_ )
A_ : Optional[int] = value.reshape(a_ )
for path in paths:
A_ : Optional[int] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
A_ : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
A_ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
A_ : Tuple = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
A_ : Union[str, Any] = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
A_ : Tuple = old_checkpoint[path["""old"""]][:, :, 0]
else:
A_ : Optional[int] = old_checkpoint[path["""old"""]]
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[Any] = {}
A_ : Dict = checkpoint["""time_embed.0.weight"""]
A_ : Dict = checkpoint["""time_embed.0.bias"""]
A_ : Optional[Any] = checkpoint["""time_embed.2.weight"""]
A_ : Tuple = checkpoint["""time_embed.2.bias"""]
A_ : List[Any] = checkpoint["""input_blocks.0.0.weight"""]
A_ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
A_ : Any = checkpoint["""out.0.weight"""]
A_ : Any = checkpoint["""out.0.bias"""]
A_ : Optional[int] = checkpoint["""out.2.weight"""]
A_ : int = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
A_ : List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
A_ : Optional[int] = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(a_ )
}
# Retrieves the keys for the middle blocks only
A_ : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
A_ : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(a_ )
}
# Retrieves the keys for the output blocks only
A_ : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
A_ : Any = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(a_ )
}
for i in range(1 , a_ ):
A_ : int = (i - 1) // (config["""num_res_blocks"""] + 1)
A_ : Optional[int] = (i - 1) % (config["""num_res_blocks"""] + 1)
A_ : Dict = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
A_ : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
A_ : List[Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
A_ : Optional[Any] = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
A_ : Optional[Any] = renew_resnet_paths(a_ )
A_ : Dict = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
A_ : str = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
a_ , a_ , a_ , additional_replacements=[meta_path, resnet_op] , config=a_ )
if len(a_ ):
A_ : Any = renew_attention_paths(a_ )
A_ : Any = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : List[Any] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
a_ , a_ , a_ , additional_replacements=[meta_path] , attention_paths_to_split=a_ , config=a_ , )
A_ : Tuple = middle_blocks[0]
A_ : Optional[int] = middle_blocks[1]
A_ : int = middle_blocks[2]
A_ : int = renew_resnet_paths(a_ )
assign_to_checkpoint(a_ , a_ , a_ , config=a_ )
A_ : Tuple = renew_resnet_paths(a_ )
assign_to_checkpoint(a_ , a_ , a_ , config=a_ )
A_ : Optional[int] = renew_attention_paths(a_ )
A_ : Optional[int] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
a_ , a_ , a_ , attention_paths_to_split=a_ , config=a_ )
for i in range(a_ ):
A_ : Union[str, Any] = i // (config["""num_res_blocks"""] + 1)
A_ : Union[str, Any] = i % (config["""num_res_blocks"""] + 1)
A_ : List[str] = [shave_segments(a_ , 2 ) for name in output_blocks[i]]
A_ : Union[str, Any] = {}
for layer in output_block_layers:
A_ , A_ : List[str] = layer.split(""".""" )[0], shave_segments(a_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a_ )
else:
A_ : Optional[int] = [layer_name]
if len(a_ ) > 1:
A_ : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
A_ : str = renew_resnet_paths(a_ )
A_ : Dict = renew_resnet_paths(a_ )
A_ : Tuple = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
A_ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
A_ : Optional[Any] = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
A_ : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(a_ ) == 2:
A_ : int = []
if len(a_ ):
A_ : Union[str, Any] = renew_attention_paths(a_ )
A_ : Optional[int] = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : str = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
a_ , a_ , a_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=a_ , )
else:
A_ : List[str] = renew_resnet_paths(a_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
A_ : List[str] = """.""".join(["""output_blocks""", str(a_ ), path["""old"""]] )
A_ : int = """.""".join(["""up_blocks""", str(a_ ), """resnets""", str(a_ ), path["""new"""]] )
A_ : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase__ : Tuple = parser.parse_args()
UpperCamelCase__ : Union[str, Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase__ : Any = json.loads(f.read())
UpperCamelCase__ : Any = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase__ : List[str] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase__ : Dict = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase__ : List[Any] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase__ : str = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 385
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(snake_case__ , "num_heads" ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict=13 , snake_case__ : List[Any]=64 , snake_case__ : str=3 , snake_case__ : List[str]=[16, 48, 96] , snake_case__ : List[Any]=[1, 3, 6] , snake_case__ : Tuple=[1, 2, 10] , snake_case__ : Optional[int]=[7, 3, 3] , snake_case__ : int=[4, 2, 2] , snake_case__ : Dict=[2, 1, 1] , snake_case__ : List[Any]=[2, 2, 2] , snake_case__ : Optional[int]=[False, False, True] , snake_case__ : Tuple=[0.0, 0.0, 0.0] , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[int]=1e-12 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Tuple = patch_sizes
UpperCAmelCase__ : List[str] = patch_stride
UpperCAmelCase__ : Union[str, Any] = patch_padding
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : Union[str, Any] = num_heads
UpperCAmelCase__ : str = stride_kv
UpperCAmelCase__ : Optional[Any] = depth
UpperCAmelCase__ : Tuple = cls_token
UpperCAmelCase__ : int = attention_drop_rate
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFCvtModel(config=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Optional[int] = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : List[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : List[str] = TFCvtForImageClassification(snake_case__ )
UpperCAmelCase__ : List[Any] = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( A , A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowercase_ : Dict = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : str = False
lowercase_ : List[str] = False
lowercase_ : Union[str, Any] = False
lowercase_ : Tuple = False
lowercase_ : int = False
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFCvtModelTester(self )
UpperCAmelCase__ : str = TFCvtConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(snake_case__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
UpperCAmelCase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase ( self : int ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict ):
UpperCAmelCase__ : Optional[int] = model_class(snake_case__ )
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : List[str] = outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = TFCvtModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def snake_case_ ( ):
UpperCAmelCase__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
# verify the logits
UpperCAmelCase__ : str = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[int] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 199
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a: int = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a: str = {'''facebook/blenderbot_small-90M''': 512}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[str]:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(__snake_case )
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[str]="__start__" , lowerCamelCase : List[Any]="__end__" , lowerCamelCase : Any="__unk__" , lowerCamelCase : Optional[Any]="__null__" , **lowerCamelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(lowerCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_UpperCAmelCase = {}
@property
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase )
_UpperCAmelCase = re.sub("""(')""" , r""" \1 """ , lowerCamelCase )
_UpperCAmelCase = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase )
if "\n" in token:
_UpperCAmelCase = token.replace("""\n""" , """ __newln__""" )
_UpperCAmelCase = token.split(""" """ )
_UpperCAmelCase = []
for token in tokens:
if not len(lowerCamelCase ):
continue
_UpperCAmelCase = token.lower()
_UpperCAmelCase = tuple(lowerCamelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
_UpperCAmelCase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(lowerCamelCase ):
try:
_UpperCAmelCase = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
_UpperCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(lowerCamelCase )
_UpperCAmelCase = new_word
if len(lowerCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(lowerCamelCase )
_UpperCAmelCase = """@@ """.join(lowerCamelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def lowerCamelCase ( self : Any , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r"""\S+\n?""" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase ( self : Tuple , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : List[str] , lowerCamelCase : int ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = """ """.join(lowerCamelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 108
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase ):
@register_to_config
def __init__( self : Any , a_ : bool , a_ : Optional[int] = None , a_ : Optional[int] = None ):
"""simple docstring"""
super().__init__()
__snake_case = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__snake_case = torch.zeros(a_ , a_ )
else:
__snake_case = None
__snake_case = torch.nn.Parameter(a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__( self : Tuple , a_ : VQModel , a_ : CLIPTextModel , a_ : CLIPTokenizer , a_ : TransformeraDModel , a_ : VQDiffusionScheduler , a_ : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=a_ , transformer=a_ , text_encoder=a_ , tokenizer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
def A ( self : Union[str, Any] , a_ : str , a_ : str , a_ : Dict ):
"""simple docstring"""
__snake_case = len(a_ ) if isinstance(a_ , a_ ) else 1
# get prompt text embeddings
__snake_case = self.tokenizer(
a_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__snake_case = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=a_ )
# duplicate text embeddings for each generation per prompt
__snake_case = prompt_embeds.repeat_interleave(a_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__snake_case = self.learned_classifier_free_sampling_embeddings.embeddings
__snake_case = negative_prompt_embeds.unsqueeze(0 ).repeat(a_ , 1 , 1 )
else:
__snake_case = [""] * batch_size
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="pt" , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__snake_case = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=a_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = negative_prompt_embeds.shape[1]
__snake_case = negative_prompt_embeds.repeat(1 , a_ , 1 )
__snake_case = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , a_ : Union[str, List[str]] , a_ : int = 100 , a_ : float = 5.0 , a_ : float = 1.0 , a_ : int = 1 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , ):
"""simple docstring"""
if isinstance(a_ , a_ ):
__snake_case = 1
elif isinstance(a_ , a_ ):
__snake_case = len(a_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
__snake_case = batch_size * num_images_per_prompt
__snake_case = guidance_scale > 1.0
__snake_case = self._encode_prompt(a_ , a_ , a_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(a_ )}.''' )
# get the initial completely masked latents unless the user supplied it
__snake_case = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__snake_case = self.transformer.num_vector_embeds - 1
__snake_case = torch.full(a_ , a_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__snake_case = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a_ , device=self.device )
__snake_case = self.scheduler.timesteps.to(self.device )
__snake_case = latents
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the sample if we are doing classifier free guidance
__snake_case = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__snake_case = self.transformer(a_ , encoder_hidden_states=a_ , timestep=a_ ).sample
if do_classifier_free_guidance:
__snake_case , __snake_case = model_output.chunk(2 )
__snake_case = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(a_ , dim=1 , keepdim=a_ )
__snake_case = self.truncate(a_ , a_ )
# remove `log(0)`'s (`-inf`s)
__snake_case = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(a_ , timestep=a_ , sample=a_ , generator=a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
__snake_case = self.vqvae.config.vq_embed_dim
__snake_case = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__snake_case = self.vqvae.quantize.get_codebook_entry(a_ , shape=a_ )
__snake_case = self.vqvae.decode(a_ , force_not_quantize=a_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
def A ( self : Dict , a_ : torch.FloatTensor , a_ : float ):
"""simple docstring"""
__snake_case , __snake_case = torch.sort(a_ , 1 , descending=a_ )
__snake_case = torch.exp(a_ )
__snake_case = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__snake_case = torch.full_like(keep_mask[:, 0:1, :] , a_ )
__snake_case = torch.cat((all_true, keep_mask) , dim=1 )
__snake_case = keep_mask[:, :-1, :]
__snake_case = keep_mask.gather(1 , indices.argsort(1 ) )
__snake_case = log_p_x_0.clone()
__snake_case = -torch.inf # -inf = log(0)
return rv
| 680
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCamelCase (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any]=1, _UpperCAmelCase : List[str]=False, **_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = d_embed
SCREAMING_SNAKE_CASE__ : Optional[int] = d_proj
SCREAMING_SNAKE_CASE__ : List[Any] = cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE__ : int = [0] + self.cutoffs
SCREAMING_SNAKE_CASE__ : Tuple = div_val
SCREAMING_SNAKE_CASE__ : List[Any] = self.cutoffs[0]
SCREAMING_SNAKE_CASE__ : int = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE__ : str = keep_order
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
def A_ ( self : Dict, _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE__ : str = self.add_weight(
shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=_UpperCAmelCase, name="cluster_weight" )
SCREAMING_SNAKE_CASE__ : int = self.add_weight(
shape=(self.n_clusters,), initializer="zeros", trainable=_UpperCAmelCase, name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE__ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj), initializer="zeros", trainable=_UpperCAmelCase, name=F'''out_projs_._{i}''', )
self.out_projs.append(_UpperCAmelCase )
else:
self.out_projs.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed), initializer="zeros", trainable=_UpperCAmelCase, name=F'''out_layers_._{i}_._weight''', )
SCREAMING_SNAKE_CASE__ : List[Any] = self.add_weight(
shape=(self.vocab_size,), initializer="zeros", trainable=_UpperCAmelCase, name=F'''out_layers_._{i}_._bias''', )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ : Dict = self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=_UpperCAmelCase, name=F'''out_projs_._{i}''' )
self.out_projs.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i), initializer="zeros", trainable=_UpperCAmelCase, name=F'''out_layers_._{i}_._weight''', )
SCREAMING_SNAKE_CASE__ : Any = self.add_weight(
shape=(r_idx - l_idx,), initializer="zeros", trainable=_UpperCAmelCase, name=F'''out_layers_._{i}_._bias''', )
self.out_layers.append((weight, bias) )
super().build(_UpperCAmelCase )
@staticmethod
def A_ ( _UpperCAmelCase : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x
if proj is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.einsum("ibd,ed->ibe", _UpperCAmelCase, _UpperCAmelCase )
return tf.einsum("ibd,nd->ibn", _UpperCAmelCase, _UpperCAmelCase ) + b
@staticmethod
def A_ ( _UpperCAmelCase : Dict, _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = shape_list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tf.range(lp_size[0], dtype=target.dtype )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.stack([r, target], 1 )
return tf.gather_nd(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : str=True, _UpperCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = 0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ : Dict = self._logit(_UpperCAmelCase, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_UpperCAmelCase, logits=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = tf.nn.log_softmax(_UpperCAmelCase, axis=-1 )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = shape_list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE__ : int = (target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE__ : List[Any] = tf.where(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.boolean_mask(_UpperCAmelCase, _UpperCAmelCase ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ : Dict = self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.out_layers[i][0]
SCREAMING_SNAKE_CASE__ : Dict = self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.concat([cur_W, self.cluster_weight], 0 )
SCREAMING_SNAKE_CASE__ : str = tf.concat([cur_b, self.cluster_bias], 0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._logit(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, self.out_projs[0] )
SCREAMING_SNAKE_CASE__ : str = tf.nn.log_softmax(_UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.boolean_mask(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self._gather_logprob(_UpperCAmelCase, _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : int = self._logit(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, self.out_projs[i] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.nn.log_softmax(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE__ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_UpperCAmelCase )
if target is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.boolean_mask(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tf.boolean_mask(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._gather_logprob(_UpperCAmelCase, _UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_UpperCAmelCase, -cur_logprob, shape_list(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat(_UpperCAmelCase, axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE__ : List[str] = tf.reduce_mean(_UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_UpperCAmelCase, name=self.name, aggregation="mean" if return_mean else "" )
return out
| 663
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663
| 1
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = os.path.join(args.tf_model_dir , "parameters.json" )
UpperCAmelCase_ = json.loads(open(lowerCAmelCase__ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
UpperCAmelCase_ = args.output + ".pt"
UpperCAmelCase_ = OrderedDict()
with tf.device("/CPU:0" ):
UpperCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ = reader.get_tensor(lowerCAmelCase__ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
UpperCAmelCase_ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
UpperCAmelCase_ = 8
UpperCAmelCase_ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/moe" ):
UpperCAmelCase_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/softmlp/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
UpperCAmelCase_ = key_name[-9:-7]
for i in range(16 ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
UpperCAmelCase_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/mlp" ):
UpperCAmelCase_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/p1/bias" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/p2/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/p2/bias" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/ln" ):
UpperCAmelCase_ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.norm.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/g" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.norm.weight" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/att" ):
UpperCAmelCase_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
UpperCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ = state[:, 0, :, :]
UpperCAmelCase_ = state[:, 1, :, :]
UpperCAmelCase_ = state[:, 2, :, :]
UpperCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/o/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
UpperCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/an" ):
UpperCAmelCase_ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCAmelCase_ = "model.blocks.%d.self_attn.norm.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/g" ):
UpperCAmelCase_ = "model.blocks.%d.self_attn.norm.weight" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
UpperCAmelCase_ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
UpperCAmelCase_ = "model.%s.weight" % nlayer
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
if key_name.startswith("model/wte" ):
UpperCAmelCase_ = "lm_head.weight"
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/wob" ):
UpperCAmelCase_ = "final_logits_bias"
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = state.reshape((1, -1) )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ = "model.last_project.weight"
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ = "model.last_project.bias"
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , args.output )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
lowerCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 14
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26
|
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
| 1
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __magic_name__ :
'''simple docstring'''
def __init__( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str=3 , lowerCamelCase__ : List[str]=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[int]=99 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : List[Any]=37 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : str=512 , lowerCamelCase__ : str=16 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : str=4 , lowerCamelCase__ : str=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : List[Any] = seq_length
UpperCamelCase__ : str = is_training
UpperCamelCase__ : List[Any] = use_input_mask
UpperCamelCase__ : str = use_token_type_ids
UpperCamelCase__ : Any = use_labels
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : Dict = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[int] = type_vocab_size
UpperCamelCase__ : List[str] = type_sequence_label_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Any = num_labels
UpperCamelCase__ : Tuple = num_choices
UpperCamelCase__ : Dict = scope
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : List[str] = None
if self.use_input_mask:
UpperCamelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Any = None
UpperCamelCase__ : Any = None
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCamelCase , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = FalconModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCamelCase__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : str = True
UpperCamelCase__ : Any = FalconModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
UpperCamelCase__ : str = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
UpperCamelCase__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int = FalconForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : Any = True
UpperCamelCase__ : Dict = FalconForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
UpperCamelCase__ : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
UpperCamelCase__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ : Dict = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )['''hidden_states'''][0]
UpperCamelCase__ : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )['''hidden_states'''][0]
# select random slice
UpperCamelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : List[Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
'''simple docstring'''
A: Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
A: Dict = (FalconForCausalLM,) if is_torch_available() else ()
A: int = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
A: Union[str, Any] = False
A: Union[str, Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = FalconModelTester(self )
UpperCamelCase__ : str = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ , *UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCamelCase__ : Tuple = alibi
self.model_tester.create_and_check_model(__UpperCamelCase , *__UpperCamelCase )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Dict = 3
UpperCamelCase__ : Dict = input_dict['''input_ids''']
UpperCamelCase__ : Union[str, Any] = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Union[str, Any] = FalconForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = 3
UpperCamelCase__ : Tuple = '''single_label_classification'''
UpperCamelCase__ : str = input_dict['''input_ids''']
UpperCamelCase__ : List[str] = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : List[Any] = FalconForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = input_dict['''input_ids''']
UpperCamelCase__ : str = FalconForCausalLM(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : str = model(__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCamelCase__ : Optional[Any] = input_ids.shape[0]
UpperCamelCase__ : List[str] = model._convert_to_rw_cache(result.past_key_values )
UpperCamelCase__ : Optional[int] = model._convert_cache_to_standard_format(__UpperCamelCase , __UpperCamelCase )
for layer in range(len(__UpperCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : str = 3
UpperCamelCase__ : Dict = '''multi_label_classification'''
UpperCamelCase__ : Union[str, Any] = input_dict['''input_ids''']
UpperCamelCase__ : Optional[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : Tuple = FalconForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase__ : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCamelCase , '''use_cache''' ):
return
UpperCamelCase__ : Any = model_class(__UpperCamelCase ).to(__UpperCamelCase )
if "use_cache" not in inputs:
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[Any] = model(**__UpperCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCamelCase__ : int = (
getattr(__UpperCamelCase , '''decoder_layers''' , __UpperCamelCase )
or getattr(__UpperCamelCase , '''num_decoder_layers''' , __UpperCamelCase )
or config.num_hidden_layers
)
UpperCamelCase__ : List[str] = getattr(__UpperCamelCase , '''num_kv_heads''' , config.num_attention_heads )
UpperCamelCase__ : Dict = getattr(__UpperCamelCase , '''d_model''' , config.hidden_size )
UpperCamelCase__ : Union[str, Any] = embed_dim // num_attention_heads
UpperCamelCase__ : List[str] = outputs['''past_key_values''']
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ : List[Any] = inputs['''input_ids'''].shape
for i in range(__UpperCamelCase ):
if config.new_decoder_architecture:
UpperCamelCase__ : List[str] = config.num_attention_heads
elif config.multi_query:
UpperCamelCase__ : Tuple = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
UpperCamelCase__ : Optional[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__UpperCamelCase )
UpperCamelCase__ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCamelCase )
UpperCamelCase__ : int = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
UpperCamelCase__ : int = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=19 )
UpperCamelCase__ : Dict = tokenizer.batch_decode(__UpperCamelCase )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCamelCase__ : Any = FalconForCausalLM.from_pretrained(__UpperCamelCase )
model.eval()
model.to(__UpperCamelCase )
UpperCamelCase__ : str = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=4 )
model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=4 )
model.generate(**__UpperCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCamelCase__ : List[str] = FalconForCausalLM.from_pretrained(__UpperCamelCase )
model.eval()
model.to(device=__UpperCamelCase )
UpperCamelCase__ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCamelCase )
# Test results are the same with and without cache
UpperCamelCase__ : int = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=20 , use_cache=__UpperCamelCase )
UpperCamelCase__ : List[Any] = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=20 , use_cache=__UpperCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 715
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __magic_name__ ( nn.Module):
def __init__( self : Dict ) -> str:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(3 , 4 )
UpperCamelCase__ : str = nn.BatchNormad(4 )
UpperCamelCase__ : List[str] = nn.Linear(4 , 5 )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
return output + 1
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = ModelForTest()
UpperCamelCase__ : Union[str, Any] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(test_model._hf_hook , lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
UpperCamelCase__ : List[str] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ , append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ModelForTest()
UpperCamelCase__ : List[str] = torch.randn(2 , 3 )
UpperCamelCase__ : int = test_model(x + 1 )
UpperCamelCase__ : List[Any] = test_model(x + 2 )
UpperCamelCase__ : Optional[int] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase__ : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase__ : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
UpperCamelCase__ : str = torch.randn(2 , 3 )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
UpperCamelCase__ : int = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase__ : Dict = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase__ : List[Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , output + 2 , atol=1E-5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ModelForTest()
UpperCamelCase__ : Tuple = torch.randn(2 , 3 )
UpperCamelCase__ : Tuple = test_model(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Optional[int] = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCamelCase__ : Tuple = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__ , AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
UpperCamelCase__ : Tuple = torch.randn(2 , 3 ).to(0 )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : int = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : int = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.randn(2 , 3 )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
UpperCamelCase__ : Optional[Any] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : List[Any] = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : Any = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : str = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : int = torch.randn(2 , 3 )
UpperCamelCase__ : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : int = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : List[str] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : List[str] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : List[str] = torch.randn(2 , 3 )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() , offload_buffers=lowerCamelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : Union[str, Any] = torch.randn(2 , 3 )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 106
| 0
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def UpperCAmelCase ( snake_case : Any ):
_lowerCAmelCase:Dict = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'{test_file} instead.' )
_lowerCAmelCase:Union[str, Any] = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_lowerCAmelCase:int = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
_lowerCAmelCase:str = '''.'''.join(snake_case )
return test_module_path
def UpperCAmelCase ( snake_case : Any ):
_lowerCAmelCase:Union[str, Any] = get_module_path(snake_case )
_lowerCAmelCase:str = importlib.import_module(snake_case )
return test_module
def UpperCAmelCase ( snake_case : List[str] ):
_lowerCAmelCase:str = []
_lowerCAmelCase:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:List[Any] = []
_lowerCAmelCase:Optional[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
_lowerCAmelCase:List[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowerCAmelCase:List[Any] = getattr(snake_case , '''all_model_classes''' , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def UpperCAmelCase ( snake_case : Tuple ):
_lowerCAmelCase:str = get_test_classes(snake_case )
_lowerCAmelCase:int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def UpperCAmelCase ( snake_case : Any ):
_lowerCAmelCase:Union[str, Any] = test_class()
if hasattr(snake_case , '''setUp''' ):
test.setUp()
_lowerCAmelCase:List[Any] = None
if hasattr(snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowerCAmelCase:Union[str, Any] = test.model_tester.__class__
return model_tester
def UpperCAmelCase ( snake_case : List[str] , snake_case : List[str] ):
_lowerCAmelCase:List[Any] = get_test_classes(snake_case )
_lowerCAmelCase:Any = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def UpperCAmelCase ( snake_case : List[Any] , snake_case : Optional[Any] ):
_lowerCAmelCase:Optional[Any] = get_test_classes_for_model(snake_case , snake_case )
_lowerCAmelCase:Optional[int] = []
for test_class in test_classes:
_lowerCAmelCase:str = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def UpperCAmelCase ( snake_case : List[Any] ):
_lowerCAmelCase:Union[str, Any] = get_test_classes(snake_case )
_lowerCAmelCase:Optional[int] = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase ( snake_case : Any ):
_lowerCAmelCase:str = get_model_classes(snake_case )
_lowerCAmelCase:str = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:List[Any] = get_model_classes(snake_case )
_lowerCAmelCase:Dict = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase ( snake_case : Any ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 227
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase__ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
UpperCamelCase__ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
UpperCamelCase__ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __UpperCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence'''),
'''references''': datasets.Value('''string''' ,id='''sequence'''),
}) ,codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] ,)
def __UpperCamelCase ( self : Any ,a__ : List[str] ,a__ : Union[str, Any] ,a__ : Optional[int]=None ,a__ : int=True ,a__ : List[str]=False) -> Tuple:
"""simple docstring"""
if rouge_types is None:
_lowerCAmelCase:str = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
_lowerCAmelCase:Dict = rouge_scorer.RougeScorer(rouge_types=a__ ,use_stemmer=a__)
if use_aggregator:
_lowerCAmelCase:Tuple = scoring.BootstrapAggregator()
else:
_lowerCAmelCase:Optional[int] = []
for ref, pred in zip(a__ ,a__):
_lowerCAmelCase:Any = scorer.score(a__ ,a__)
if use_aggregator:
aggregator.add_scores(a__)
else:
scores.append(a__)
if use_aggregator:
_lowerCAmelCase:Optional[int] = aggregator.aggregate()
else:
_lowerCAmelCase:Optional[Any] = {}
for key in scores[0]:
_lowerCAmelCase:Tuple = [score[key] for score in scores]
return result
| 227
| 1
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase (snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
lowerCAmelCase = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
lowerCAmelCase = model.state_dict()
def to_tf_var_name(snake_case__ : int ):
for patt, repl in iter(snake_case__ ):
lowerCAmelCase = name.replace(snake_case__ , snake_case__ )
return f'''bert/{name}'''
def create_tf_var(snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] ):
lowerCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
lowerCAmelCase = tf.get_variable(dtype=snake_case__ , shape=tensor.shape , name=snake_case__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowerCAmelCase = to_tf_var_name(snake_case__ )
lowerCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowerCAmelCase = torch_tensor.T
lowerCAmelCase = create_tf_var(tensor=snake_case__ , name=snake_case__ , session=snake_case__ )
tf.keras.backend.set_value(snake_case__ , snake_case__ )
lowerCAmelCase = session.run(snake_case__ )
print(f'''Successfully created {tf_name}: {np.allclose(snake_case__ , snake_case__ )}''' )
lowerCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case__ , os.path.join(snake_case__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowercase (snake_case__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case__ , required=snake_case__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=snake_case__ , default=snake_case__ , required=snake_case__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=snake_case__ , required=snake_case__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=snake_case__ , required=snake_case__ , help="""Directory in which to save tensorflow model""" )
lowerCAmelCase = parser.parse_args(snake_case__ )
lowerCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase (snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCAmelCase = cst_fwd.get(snake_case__ , np.inf )
lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCAmelCase = new_cost_f
lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase (snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ) -> int:
'''simple docstring'''
lowerCAmelCase = -1
lowerCAmelCase = set()
lowerCAmelCase = set()
lowerCAmelCase = {source: 0}
lowerCAmelCase = {destination: 0}
lowerCAmelCase = {source: None}
lowerCAmelCase = {destination: None}
lowerCAmelCase = PriorityQueue()
lowerCAmelCase = PriorityQueue()
lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCAmelCase , lowerCAmelCase = queue_forward.get()
visited_forward.add(snake_case__ )
lowerCAmelCase , lowerCAmelCase = queue_backward.get()
visited_backward.add(snake_case__ )
lowerCAmelCase = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
lowerCAmelCase = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCAmelCase = shortest_distance
return shortest_path_distance
a = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
a = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = state_dict.pop(__lowercase )
_lowerCAmelCase : int = val
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCAmelCase : List[Any] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
_lowerCAmelCase : List[Any] = value
else:
_lowerCAmelCase : Optional[Any] = value
return new_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ''
if is_panoptic:
_lowerCAmelCase : Union[str, Any] = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCAmelCase : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : str = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:256, :]
_lowerCAmelCase : int = in_proj_bias[:256]
_lowerCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
_lowerCAmelCase : List[str] = in_proj_bias[256:512]
_lowerCAmelCase : Tuple = in_proj_weight[-256:, :]
_lowerCAmelCase : Any = in_proj_bias[-256:]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : List[str] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCAmelCase : str = 'resnet101'
if "dc5" in model_name:
_lowerCAmelCase : int = True
_lowerCAmelCase : List[Any] = 'panoptic' in model_name
if is_panoptic:
_lowerCAmelCase : Optional[Any] = 250
else:
_lowerCAmelCase : Optional[Any] = 91
_lowerCAmelCase : List[str] = 'huggingface/label-files'
_lowerCAmelCase : int = 'coco-detection-id2label.json'
_lowerCAmelCase : List[str] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : List[str] = {int(__lowercase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = idalabel
_lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCAmelCase : List[str] = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowerCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : int = image_processor(images=__lowercase , return_tensors='pt' )
_lowerCAmelCase : Optional[Any] = encoding['pixel_values']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_lowerCAmelCase : Optional[int] = torch.hub.load('DeppMeng/ConditionalDETR' , __lowercase , pretrained=__lowercase ).eval()
_lowerCAmelCase : Optional[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCAmelCase : List[str] = 'conditional_detr.' + src
rename_key(__lowercase , __lowercase , __lowercase )
_lowerCAmelCase : int = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCAmelCase : Any = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowerCAmelCase : Any = state_dict.pop(__lowercase )
_lowerCAmelCase : List[str] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCAmelCase : str = state_dict.pop(__lowercase )
_lowerCAmelCase : Optional[int] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowerCAmelCase : Tuple = state_dict.pop(__lowercase )
_lowerCAmelCase : Tuple = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowerCAmelCase : Union[str, Any] = state_dict.pop(__lowercase )
_lowerCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase : List[Any] = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
_lowerCAmelCase : str = conditional_detr(__lowercase )
_lowerCAmelCase : str = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 686
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCAmelCase = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
UpperCAmelCase = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = MBartTokenizer
_snake_case = []
_snake_case = []
def __init__( self : Optional[Any], SCREAMING_SNAKE_CASE_ : Tuple=None, SCREAMING_SNAKE_CASE_ : Optional[int]=None, SCREAMING_SNAKE_CASE_ : str="<s>", SCREAMING_SNAKE_CASE_ : Optional[int]="</s>", SCREAMING_SNAKE_CASE_ : Optional[int]="</s>", SCREAMING_SNAKE_CASE_ : str="<s>", SCREAMING_SNAKE_CASE_ : Any="<unk>", SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>", SCREAMING_SNAKE_CASE_ : Union[str, Any]="<mask>", SCREAMING_SNAKE_CASE_ : List[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, **SCREAMING_SNAKE_CASE_ : str, ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
snake_case : Optional[Any] = vocab_file
snake_case : Any = False if not self.vocab_file else True
snake_case : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
snake_case : Any = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case : Dict = src_lang if src_lang is not None else '''en_XX'''
snake_case : str = self.convert_tokens_to_ids(self._src_lang )
snake_case : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __snake_case ( self : Tuple ):
return self._src_lang
@src_lang.setter
def __snake_case ( self : str, SCREAMING_SNAKE_CASE_ : str ):
snake_case : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : List[int], SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : List[int], SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
snake_case : int = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : Union[str, Any], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str], SCREAMING_SNAKE_CASE_ : Optional[str], **SCREAMING_SNAKE_CASE_ : Tuple ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case : Dict = src_lang
snake_case : str = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = tgt_lang_id
return inputs
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : str = "en_XX", SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None, SCREAMING_SNAKE_CASE_ : str = "ro_RO", **SCREAMING_SNAKE_CASE_ : Dict, ):
snake_case : List[str] = src_lang
snake_case : int = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : List[Any] ):
snake_case : int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
snake_case : str = []
snake_case : Tuple = [self.eos_token_id, self.cur_lang_code]
snake_case : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str ):
snake_case : int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
snake_case : Any = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
snake_case : Any = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 555
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class a ( __magic_name__ ):
def __init__( self : Union[str, Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : Tuple ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''', SCREAMING_SNAKE_CASE_, )
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 555
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = False
@skip_mps
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionAttendAndExcitePipeline
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__a : Tuple = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
__a : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = 'cpu'
__a : Optional[int] = self.get_dummy_components()
__a : Union[str, Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
__a : Optional[Any] = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
__a : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls : str ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Tuple ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : List[str] = torch.manual_seed(5_1 )
__a : Optional[int] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
__a : List[str] = 'a painting of an elephant with glasses'
__a : Any = [5, 7]
__a : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE__ , token_indices=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 47
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__lowercase = tmp_path_factory.getbasetemp() / 'cache'
__lowercase = test_hf_cache_home / 'datasets'
__lowercase = test_hf_cache_home / 'metrics'
__lowercase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(SCREAMING_SNAKE_CASE ) )
__lowercase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
__lowercase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE , scope='session' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , SCREAMING_SNAKE_CASE )
| 688
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688
| 1
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BertJapaneseTokenizer
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : int = True
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """こんにちは、世界。 \nこんばんは、世界。"""
lowerCAmelCase = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(__lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase)
return text, ids
def a_ ( self):
"""simple docstring"""
pass # TODO add if relevant
def a_ ( self):
"""simple docstring"""
pass # TODO add if relevant
def a_ ( self):
"""simple docstring"""
pass # TODO add if relevant
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class(self.vocab_file)
lowerCAmelCase = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(__lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(__lowerCAmelCase)
lowerCAmelCase = """こんにちは、世界。\nこんばんは、世界。"""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
lowerCAmelCase = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(__lowerCAmelCase , """wb""") as handle:
pickle.dump(__lowerCAmelCase , __lowerCAmelCase)
with open(__lowerCAmelCase , """rb""") as handle:
lowerCAmelCase = pickle.load(__lowerCAmelCase)
lowerCAmelCase = tokenizer_new.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a_ ( self):
"""simple docstring"""
try:
lowerCAmelCase = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a_ ( self):
"""simple docstring"""
try:
lowerCAmelCase = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MecabTokenizer(do_lower_case=__lowerCAmelCase , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a_ ( self):
"""simple docstring"""
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=__lowerCAmelCase , normalize_text=__lowerCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MecabTokenizer(normalize_text=__lowerCAmelCase , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(__lowerCAmelCase)
lowerCAmelCase = """こんにちは、世界。\nこんばんは、世界。"""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
lowerCAmelCase = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(__lowerCAmelCase , """wb""") as handle:
pickle.dump(__lowerCAmelCase , __lowerCAmelCase)
with open(__lowerCAmelCase , """rb""") as handle:
lowerCAmelCase = pickle.load(__lowerCAmelCase)
lowerCAmelCase = tokenizer_new.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(do_lower_case=__lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(normalize_text=__lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SudachiTokenizer(trim_whitespace=__lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(__lowerCAmelCase)
lowerCAmelCase = """こんにちは、世界。\nこんばんは、世界。"""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
lowerCAmelCase = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(__lowerCAmelCase , """wb""") as handle:
pickle.dump(__lowerCAmelCase , __lowerCAmelCase)
with open(__lowerCAmelCase , """rb""") as handle:
lowerCAmelCase = pickle.load(__lowerCAmelCase)
lowerCAmelCase = tokenizer_new.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@require_jumanpp
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = JumanppTokenizer(do_lower_case=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = JumanppTokenizer(normalize_text=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = JumanppTokenizer(trim_whitespace=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowerCAmelCase = {}
for i, token in enumerate(__lowerCAmelCase):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(__lowerCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
lowerCAmelCase = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(__lowerCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
lowerCAmelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase)
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = BertJapaneseTokenizer
UpperCAmelCase_ : Dict = False
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """こんにちは、世界。 \nこんばんは、世界。"""
lowerCAmelCase = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def a_ ( self):
"""simple docstring"""
pass # TODO add if relevant
def a_ ( self):
"""simple docstring"""
pass # TODO add if relevant
def a_ ( self):
"""simple docstring"""
pass # TODO add if relevant
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
lowerCAmelCase = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
__lowerCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowerCAmelCase = {}
for i, token in enumerate(__lowerCAmelCase):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=__lowerCAmelCase , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
lowerCAmelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase)
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cl-tohoku/bert-base-japanese"""
lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase)
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(__lowerCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
lowerCAmelCase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(__lowerCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 370
|
'''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(_A )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start , num + 1 , _A ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 370
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
while b:
lowercase__ , lowercase__ = b, a % b
return a
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b )
def __lowerCAmelCase ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 714
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]:
"""simple docstring"""
lowercase__ = Rectangle(height=0.5 , width=0.5 )
lowercase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*a ).arrange(a , buff=0 )
lowercase__ = VGroup(*a ).arrange(a , buff=0 )
lowercase__ = VGroup(a , a ).arrange(a , buff=0 )
lowercase__ = Text('CPU' , font_size=24 )
lowercase__ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
lowercase__ = [mem.copy() for i in range(4 )]
lowercase__ = VGroup(*a ).arrange(a , buff=0 )
lowercase__ = Text('GPU' , font_size=24 )
lowercase__ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*a ).arrange(a , buff=0 )
lowercase__ = Text('Model' , font_size=24 )
lowercase__ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
lowercase__ = []
for i, rect in enumerate(a ):
rect.set_stroke(a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowercase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a , buff=0.0 )
self.add(a )
cpu_targs.append(a )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*a ).arrange(a , buff=0 )
lowercase__ = Text('Loaded Checkpoint' , font_size=24 )
lowercase__ = Group(a , a ).arrange(a , aligned_edge=a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
lowercase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowercase__ = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a ) , Write(a ) )
self.play(Write(a , run_time=1 ) , Create(a , run_time=1 ) )
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(a ):
lowercase__ = fill.copy().set_fill(a , opacity=0.7 )
target.move_to(a )
first_animations.append(GrowFromCenter(a , run_time=1 ) )
lowercase__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a , run_time=1.5 ) )
self.play(*a )
self.play(*a )
self.wait()
| 235
|
import requests
lowercase_ = """YOUR API KEY"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = giphy_api_key ) -> list:
lowercase__ = '+'.join(query.split() )
lowercase__ = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 235
| 1
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Any = ['''note_seq''']
def __init__( self: List[str] , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def lowerCamelCase_ ( cls: List[Any] , *UpperCamelCase_: int , **UpperCamelCase_: List[str] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 429
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = ['''pixel_values''']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Dict[str, int]] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''shortest_edge''': 256}
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowercase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[int] , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: List[Tuple] = None ) -> List[str]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase_ ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(UpperCamelCase_ ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 429
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( lowerCamelCase ):
@staticmethod
@abstractmethod
def _a ( a_ ) -> str:
raise NotImplementedError()
@abstractmethod
def _a ( self ) -> Tuple:
raise NotImplementedError()
| 657
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657
| 1
|
from ... import PretrainedConfig
UpperCamelCase__ ={
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case = 'nezha'
def __init__( self , __lowerCamelCase=2_1_1_2_8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=6_4 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0.1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=True , **__lowerCamelCase , ) -> Tuple:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : int = num_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = max_relative_position
_SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
_SCREAMING_SNAKE_CASE : int = use_cache
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ ={
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 381
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ = 42
snake_case__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ , a_ , a_ , a_ : Optional[Any] = hidden_states.shape
a_ : Any = jax.image.resize(
__SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
a_ : Any = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ = 42
snake_case__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : Union[str, Any] = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ = 42
snake_case__ = None
snake_case__ = 0.0
snake_case__ = None
snake_case__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a_ : Optional[Any] = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[Any] = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
a_ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a_ : Union[str, Any] = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : int = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : Any = None
if use_nin_shortcut:
a_ : Optional[Any] = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=True ) -> str:
a_ : Union[str, Any] = hidden_states
a_ : int = self.norma(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = nn.swish(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = self.conva(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , 1 )
a_ : int = hidden_states + temb
a_ : Tuple = self.norma(__SCREAMING_SNAKE_CASE )
a_ : List[str] = nn.swish(__SCREAMING_SNAKE_CASE )
a_ : Dict = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[Any] = self.conva(__SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
a_ : int = self.conv_shortcut(__SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 466
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466
| 1
|
"""simple docstring"""
from math import ceil
def lowercase (_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = list(range(0 ,__snake_case ) )
__UpperCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(__snake_case ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__snake_case )
# Missing blocks
__UpperCamelCase = [i for i in blocks if i not in device_map_blocks]
__UpperCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(__snake_case ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__snake_case ) )
if len(__snake_case ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__snake_case ) )
if len(__snake_case ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__snake_case ) )
def lowercase (_snake_case ,_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = list(range(__snake_case ) )
__UpperCamelCase = int(ceil(n_layers / len(__snake_case ) ) )
__UpperCamelCase = [layers[i : i + n_blocks] for i in range(0 ,__snake_case ,__snake_case )]
return dict(zip(__snake_case ,__snake_case ) )
| 707
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCamelCase = {"unk_token": "<unk>"}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
__UpperCamelCase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def A ( self : Dict , **A_ : List[str] )-> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Optional[int] , **A_ : Any )-> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Any , **A_ : List[Any] )-> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Tuple )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A ( self : int )-> str:
__UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : List[Any] )-> Optional[Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def A ( self : Dict )-> Dict:
__UpperCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def A ( self : int )-> Any:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(A_ , return_tensors="np" )
__UpperCamelCase = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = processor(text=A_ )
__UpperCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Union[str, Any] )-> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(images=A_ , visual_prompt=A_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Optional[int] )-> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(A_ )
__UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 228
| 0
|
lowerCAmelCase__ = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
assert type(_UpperCAmelCase ) in (int, float) and decimal == int(_UpperCAmelCase )
__lowercase = int(_UpperCAmelCase )
__lowercase = ""
__lowercase = False
if decimal < 0:
__lowercase = True
decimal *= -1
while decimal > 0:
__lowercase , __lowercase = divmod(_UpperCAmelCase , 16 )
__lowercase = values[remainder] + hexadecimal
__lowercase = "0x" + hexadecimal
if negative:
__lowercase = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model'}
lowerCAmelCase__ = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
lowerCAmelCase__ = '▁'
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=100 , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase = [f'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowercase = len(set(filter(lambda lowerCAmelCase_ : bool("extra_id" in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__lowercase = legacy
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = vocab_file
__lowercase = extra_ids
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@staticmethod
def snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase_ , )
return max_model_length
@property
def snake_case__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def snake_case__ ( self ):
__lowercase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_ )) + [1]
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def snake_case__ ( self ):
return list(
set(filter(lambda lowerCAmelCase_ : bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def snake_case__ ( self ):
return [self._convert_token_to_id(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
def snake_case__ ( self , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = self._add_eos_if_not_present(lowerCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
__lowercase = self._add_eos_if_not_present(lowerCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , lowerCAmelCase_ ):
__lowercase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowercase = SPIECE_UNDERLINE + text.replace(lowerCAmelCase_ , " " )
return super().tokenize(lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
if not self.legacy:
__lowercase = text.startswith(lowerCAmelCase_ )
if is_first:
__lowercase = text[1:]
__lowercase = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowerCAmelCase_ ):
__lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def snake_case__ ( self , lowerCAmelCase_ ):
if token.startswith("<extra_id_" ):
__lowercase = re.match(r"<extra_id_(\d+)>" , lowerCAmelCase_ )
__lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(lowerCAmelCase_ )
else:
__lowercase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = []
__lowercase = ""
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__lowercase = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 321
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict = 1_6000 ) -> str:
"""simple docstring"""
__lowerCamelCase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
__lowerCamelCase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = field(default=_UpperCamelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
snake_case_ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
snake_case_ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
snake_case_ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
snake_case_ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case_ = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
snake_case_ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
snake_case_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
snake_case_ = field(
default=_UpperCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
__lowerCamelCase = DatasetDict()
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__lowerCamelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__lowerCamelCase = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase__ : Optional[Any] ):
__lowerCamelCase = []
for audio in batch[data_args.audio_column_name]:
__lowerCamelCase = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
__lowerCamelCase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
__lowerCamelCase = {model_input_name: inputs.get(lowercase_ )}
__lowerCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase__ : List[Any] ):
__lowerCamelCase = [audio["array"] for audio in batch[data_args.audio_column_name]]
__lowerCamelCase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
__lowerCamelCase = {model_input_name: inputs.get(lowercase_ )}
__lowerCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCamelCase = raw_datasets["train"].features[data_args.label_column_name].names
__lowerCamelCase = {}, {}
for i, label in enumerate(lowercase_ ):
__lowerCamelCase = str(lowercase_ )
__lowerCamelCase = label
# Load the accuracy metric from the datasets package
__lowerCamelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ : Optional[int] ):
__lowerCamelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCamelCase = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCamelCase = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
__lowerCamelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
__lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
__lowerCamelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Write model card and (optionally) push to hub
__lowerCamelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 707
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ['''image''']
snake_case_ = ['''image''']
snake_case_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return 8
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCamelCase = PriorTransformer(**lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
__lowerCamelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = torch_device == 'cpu'
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 167
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( a__):
_lowerCAmelCase = '''cvt'''
def __init__( self, A=3, A=[7, 3, 3], A=[4, 2, 2], A=[2, 1, 1], A=[64, 192, 384], A=[1, 3, 6], A=[1, 2, 10], A=[4.0, 4.0, 4.0], A=[0.0, 0.0, 0.0], A=[0.0, 0.0, 0.0], A=[0.0, 0.0, 0.1], A=[True, True, True], A=[False, False, True], A=["dw_bn", "dw_bn", "dw_bn"], A=[3, 3, 3], A=[1, 1, 1], A=[2, 2, 2], A=[1, 1, 1], A=[1, 1, 1], A=0.02, A=1e-12, **A, ):
"""simple docstring"""
super().__init__(**A )
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Optional[Any] = patch_sizes
lowerCamelCase : str = patch_stride
lowerCamelCase : Any = patch_padding
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : Dict = num_heads
lowerCamelCase : List[Any] = depth
lowerCamelCase : Tuple = mlp_ratio
lowerCamelCase : List[Any] = attention_drop_rate
lowerCamelCase : Union[str, Any] = drop_rate
lowerCamelCase : Union[str, Any] = drop_path_rate
lowerCamelCase : int = qkv_bias
lowerCamelCase : List[Any] = cls_token
lowerCamelCase : Optional[int] = qkv_projection_method
lowerCamelCase : List[Any] = kernel_qkv
lowerCamelCase : int = padding_kv
lowerCamelCase : str = stride_kv
lowerCamelCase : Any = padding_q
lowerCamelCase : List[str] = stride_q
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
| 320
|
'''simple docstring'''
A = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 320
| 1
|
import numpy as np
def snake_case_ (_a : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def snake_case_ (_a : np.ndarray ):
return vector * sigmoid(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358
| 0
|
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = len(_A )
for i in range(length - 1 ):
UpperCAmelCase = i
for k in range(i + 1 , _A ):
if collection[k] < collection[least]:
UpperCAmelCase = k
if least != i:
UpperCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a__ : Dict = input('Enter numbers separated by a comma:\n').strip()
a__ : Optional[int] = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 51
|
"""simple docstring"""
import string
from math import logaa
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Union[str, Any] = document.translate(
str.maketrans("", "", string.punctuation ) ).replace("\n", "" )
snake_case_ :Tuple = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Dict = corpus.lower().translate(
str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ :Any = corpus_without_punctuation.split("\n" )
snake_case_ :Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def A ( _A, _A, _A=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ), 3 )
def A ( _A, _A ):
"""simple docstring"""
return round(tf * idf, 3 )
| 584
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=1_6 , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2", "stage3"] , lowerCAmelCase__=[1, 2, 3] , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Tuple = embed_dim
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : Any = num_heads
_UpperCAmelCase : Union[str, Any] = window_size
_UpperCAmelCase : List[str] = mlp_ratio
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : int = drop_path_rate
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Tuple = use_absolute_embeddings
_UpperCAmelCase : int = patch_norm
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : List[str] = scope
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = encoder_stride
_UpperCAmelCase : int = out_features
_UpperCAmelCase : List[str] = out_indices
def snake_case_ (self ):
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ (self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = MaskFormerSwinModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ )
_UpperCAmelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = MaskFormerSwinBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = ["""stem"""]
_UpperCAmelCase : str = MaskFormerSwinBackbone(config=lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : Tuple = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
snake_case : int = False
snake_case : Dict = False
snake_case : Union[str, Any] = False
snake_case : Tuple = False
snake_case : List[Any] = False
def snake_case_ (self ):
_UpperCAmelCase : Dict = MaskFormerSwinModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case_ (self ):
pass
def snake_case_ (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ (self ):
return
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case_ (self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCAmelCase__ )
_UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case_ (self ):
pass
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Dict = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# Swin has a different seq_length
_UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase : int = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Tuple = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase__ ):
_UpperCAmelCase : Dict = 0
return t
def check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__={} ):
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : str = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ ).to_tuple()
def recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase__ ) , set_nan_tensor_to_zero(lowerCAmelCase__ ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowerCAmelCase__ ).any()} and `inf`: {torch.isinf(lowerCAmelCase__ )}. Dict has"
F" `nan`: {torch.isnan(lowerCAmelCase__ ).any()} and `inf`: {torch.isinf(lowerCAmelCase__ )}."
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"""output_hidden_states""": True} )
_UpperCAmelCase : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCAmelCase : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class __lowerCAmelCase ( unittest.TestCase , __a ):
snake_case : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case : Any = MaskFormerSwinConfig
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = MaskFormerSwinModelTester(self )
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = backbone_class(lowerCAmelCase__ )
backbone.to(lowerCAmelCase__ )
backbone.eval()
_UpperCAmelCase : Union[str, Any] = backbone(**lowerCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase : str = backbone(**lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase : List[Any] = backbone(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 707
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCAmelCase : Union[str, Any] = processors[data_args.task_name]()
_UpperCAmelCase : int = processor.get_labels()
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
_UpperCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : int = trainer.evaluate()
_UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 156
| 0
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCamelCase_ = False, False, False
@dataclass
class UpperCamelCase_ :
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = None
# Automatically constructed
__magic_name__ = "dict"
__magic_name__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__magic_name__ = field(default='''Audio''' , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __call__( self : Dict ) -> int:
return self.pa_type
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Union[str, bytes, dict] ) -> List[Any]:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install \'soundfile\'." ) from err
if isinstance(__lowercase , __lowercase ):
return {"bytes": None, "path": value}
elif isinstance(__lowercase , __lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ : Optional[int] = BytesIO()
sf.write(__lowercase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a \'sampling_rate\' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ : int = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
UpperCAmelCase_ : int = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32_767
UpperCAmelCase_ : Tuple = BytesIO(bytes() )
sf.write(__lowercase , __lowercase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : dict , lowerCAmelCase_ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> str:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ : Any = (value['''path'''], BytesIO(value["bytes"] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install \'librosa\' and \'soundfile\'." ) from err
UpperCAmelCase_ : Union[str, Any] = xsplitext(__lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ : Union[str, Any] = token_per_repo_id or {}
UpperCAmelCase_ : Union[str, Any] = path.split("::" )[-1]
try:
UpperCAmelCase_ : Dict = string_to_dict(__lowercase , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ : Any = None
with xopen(__lowercase , "rb" , use_auth_token=__lowercase ) as f:
UpperCAmelCase_ : Optional[Any] = sf.read(__lowercase )
else:
UpperCAmelCase_ : Optional[Any] = sf.read(__lowercase )
UpperCAmelCase_ : Union[str, Any] = array.T
if self.mono:
UpperCAmelCase_ : int = librosa.to_mono(__lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ : str = librosa.resample(__lowercase , orig_sr=__lowercase , target_sr=self.sampling_rate )
UpperCAmelCase_ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray] ) -> Tuple:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(__lowercase ) , type=pa.binary() )
UpperCAmelCase_ : str = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(__lowercase ) , type=pa.string() )
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ : Tuple = pa.array([Audio().encode_example(__lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ : Union[str, Any] = storage.field("bytes" )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(__lowercase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ : int = storage.field("path" )
else:
UpperCAmelCase_ : str = pa.array([None] * len(__lowercase ) , type=pa.string() )
UpperCAmelCase_ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__lowercase , self.pa_type )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : pa.StructArray ) -> Dict:
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : str ):
with xopen(__lowercase , "rb" ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
return bytes_
UpperCAmelCase_ : List[Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ : Optional[Any] = pa.array(
[os.path.basename(__lowercase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowercase , self.pa_type )
| 95
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :List[Any] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'mctct'
def __init__( self : List[Any] , __lowercase : Optional[int]=8_065 , __lowercase : Union[str, Any]=1_536 , __lowercase : str=36 , __lowercase : Optional[int]=6_144 , __lowercase : Union[str, Any]=4 , __lowercase : str=384 , __lowercase : str=920 , __lowercase : List[str]=1e-5 , __lowercase : str=0.3 , __lowercase : Union[str, Any]="relu" , __lowercase : List[str]=0.0_2 , __lowercase : List[Any]=0.3 , __lowercase : Tuple=0.3 , __lowercase : int=1 , __lowercase : str=0 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[Any]=1 , __lowercase : Any=0.3 , __lowercase : int=1 , __lowercase : Optional[Any]=(7,) , __lowercase : List[str]=(3,) , __lowercase : int=80 , __lowercase : Any=1 , __lowercase : Union[str, Any]=None , __lowercase : Optional[Any]="sum" , __lowercase : int=False , **__lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Tuple = attention_head_dim
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[int] = layerdrop
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = pad_token_id
__UpperCAmelCase : Union[str, Any] = bos_token_id
__UpperCAmelCase : Optional[int] = eos_token_id
__UpperCAmelCase : Optional[Any] = conv_glu_dim
__UpperCAmelCase : List[str] = conv_dropout
__UpperCAmelCase : str = num_conv_layers
__UpperCAmelCase : int = input_feat_per_channel
__UpperCAmelCase : Any = input_channels
__UpperCAmelCase : int = conv_channels
__UpperCAmelCase : List[Any] = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCAmelCase : str = list(__lowercase )
__UpperCAmelCase : Union[str, Any] = list(__lowercase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 522
| 0
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase =[
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : Any = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
UpperCamelCase_ : Any = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
UpperCamelCase_ : Optional[Any] = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Any = """\n""".join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ : Optional[int] = diff.replace(__lowerCAmelCase , """""" )
self.assertEqual(__lowerCAmelCase , """""" )
def _UpperCAmelCase ( self ):
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
UpperCamelCase_ : Tuple = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : Dict = False
@classmethod
def _UpperCAmelCase ( cls ):
super().setUpClass()
UpperCamelCase_ : List[Any] = tempfile.mkdtemp()
UpperCamelCase_ : Dict = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ : List[str] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _UpperCAmelCase ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase_ : Union[str, Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
UpperCamelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
UpperCamelCase_ : Optional[int] = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
UpperCamelCase_ : int = torch.cuda.device_count()
else:
UpperCamelCase_ : List[Any] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
@slow
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
UpperCamelCase_ : str = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
UpperCamelCase_ : Any = re.findall("""({.+})""" , __lowerCAmelCase )
UpperCamelCase_ : List[str] = [r for r in results if """accuracy""" in r][-1]
UpperCamelCase_ : str = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ : Optional[int] = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , """tracking""" ) ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 713
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( a_ : str ) -> int:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a_ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def snake_case ( a_ : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase_ : List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCamelCase_ : Tuple = PipelineDataFormat.from_str(
format=a_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(a_ , a_ )
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = nlp
UpperCamelCase_ : Optional[Any] = reader
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase ):
UpperCamelCase_ : List[str] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__lowerCAmelCase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__lowerCAmelCase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__lowerCAmelCase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__lowerCAmelCase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : str = self._nlp, []
for entry in self._reader:
UpperCamelCase_ : List[Any] = nlp(**__lowerCAmelCase ) if self._reader.is_multi_columns else nlp(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
outputs.append(__lowerCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase_ : int = self._reader.save_binary(__lowerCAmelCase )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__lowerCAmelCase )
| 543
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Dict = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """codegen"""
__lowercase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=5_04_00 , lowerCAmelCase_=20_48 , lowerCAmelCase_=20_48 , lowerCAmelCase_=40_96 , lowerCAmelCase_=28 , lowerCAmelCase_=16 , lowerCAmelCase_=64 , lowerCAmelCase_=None , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = n_ctx
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = rotary_dim
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = "default" , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
_snake_case = 0
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 13
| 495
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case = do_convert_rgb
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size=size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='size' , default_to_square=lowerCAmelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' , default_to_square=lowerCAmelCase_ )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_snake_case = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 495
| 1
|
from scipy.stats import pearsonr
import datasets
lowerCamelCase : Optional[Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
lowerCamelCase : Optional[Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
lowerCamelCase : int = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase (datasets.Metric ):
def __UpperCAmelCase ( self )-> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> Tuple:
if return_pvalue:
__lowerCAmelCase = pearsonr(__UpperCamelCase , __UpperCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )}
| 290
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase : List[str] = '''Create a default config file for Accelerate with only a few flags set.'''
def __lowerCAmelCase ( __snake_case="no" , __snake_case = default_json_config_file , __snake_case = False ):
__lowerCAmelCase = Path(__snake_case )
path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
__lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
__lowerCAmelCase = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__lowerCAmelCase = torch.cuda.device_count()
__lowerCAmelCase = num_gpus
__lowerCAmelCase = False
if num_gpus > 1:
__lowerCAmelCase = "MULTI_GPU"
else:
__lowerCAmelCase = "NO"
elif is_xpu_available() and use_xpu:
__lowerCAmelCase = torch.xpu.device_count()
__lowerCAmelCase = num_xpus
__lowerCAmelCase = False
if num_xpus > 1:
__lowerCAmelCase = "MULTI_XPU"
else:
__lowerCAmelCase = "NO"
elif is_npu_available():
__lowerCAmelCase = torch.npu.device_count()
__lowerCAmelCase = num_npus
__lowerCAmelCase = False
if num_npus > 1:
__lowerCAmelCase = "MULTI_NPU"
else:
__lowerCAmelCase = "NO"
else:
__lowerCAmelCase = 0
__lowerCAmelCase = True
__lowerCAmelCase = 1
__lowerCAmelCase = "NO"
__lowerCAmelCase = ClusterConfig(**__snake_case )
config.to_json_file(__snake_case )
return path
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = parser.add_parser("default" , parents=__snake_case , help=__snake_case , formatter_class=__snake_case )
parser.add_argument(
"--config_file" , default=__snake_case , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__snake_case , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__snake_case )
return parser
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 290
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.