code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = mock.Mock()
snake_case : Any = 500
snake_case : Optional[Any] = {}
snake_case : int = HTTPError
snake_case : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case : List[str] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : int = 500
snake_case : Dict = {}
snake_case : List[Any] = HTTPError
snake_case : Any = {}
# Download this model to make sure it's in the cache.
snake_case : Optional[Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
snake_case : Optional[int] = tempfile.mktemp()
with open(snake_case__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , snake_case__ )
snake_case : Union[str, Any] = AlbertTokenizer.from_pretrained(snake_case__ )
finally:
os.remove(snake_case__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , snake_case__ )
snake_case : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCamelCase ( cls ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def lowerCamelCase ( cls ) -> Tuple:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[Any] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Optional[Any] = BertTokenizer(snake_case__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case : List[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ , repo_id="test-tokenizer" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : str = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Tuple = BertTokenizer(snake_case__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case : Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Tuple = CustomTokenizer(snake_case__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[Any] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : str = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
snake_case : Optional[int] = CustomTokenizerFast.from_pretrained(snake_case__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case : Dict = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=snake_case__ , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : str = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Dict = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Tuple = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = Trie()
snake_case : Optional[int] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case__ , ["AB", "C"] )
| 203
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase : str = is_leaf
lowerCAmelCase : str = prefix
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for word in words:
self.insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase : Optional[Any] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
lowerCAmelCase : Tuple = self.nodes[word[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase : Optional[Any] = remaining_prefix
lowerCAmelCase : int = self.nodes[matching_string[0]]
lowerCAmelCase : List[Any] = RadixNode(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = aux_node
if remaining_word == "":
lowerCAmelCase : Optional[int] = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase : List[str] = list(self.nodes.values() )[0]
lowerCAmelCase : List[str] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase : Tuple = merging_node.nodes
return True
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase : List[str] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = RadixNode()
lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE )
print("Words:" , SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 108
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
__snake_case = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
__snake_case = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def __lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case : Optional[Any] = bs[:]
snake_case : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
snake_case : Any = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case : int = set()
snake_case : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Dict = char
return pairs
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
snake_case : Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
snake_case : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
snake_case : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
snake_case : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
snake_case : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
snake_case : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle:
snake_case : str = json.load(UpperCamelCase__ )
snake_case : int = {v: k for k, v in self.encoder.items()}
snake_case : Optional[Any] = errors # how to handle errors in decoding
snake_case : List[Any] = bytes_to_unicode()
snake_case : int = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle:
snake_case : int = merges_handle.read().split("\n" )[1:-1]
snake_case : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : Dict = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Any = {}
snake_case : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Any = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case : List[str] = tuple(UpperCamelCase__ )
snake_case : Optional[Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case : List[Any] = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case : str = bigram
snake_case : Tuple = []
snake_case : Any = 0
while i < len(UpperCamelCase__ ):
try:
snake_case : int = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : List[str] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : Dict = tuple(UpperCamelCase__ )
snake_case : Optional[int] = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case : Optional[int] = get_pairs(UpperCamelCase__ )
snake_case : Dict = " ".join(UpperCamelCase__ )
snake_case : List[str] = word
return word
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = []
for token in re.findall(self.pat , UpperCamelCase__ ):
snake_case : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(" " ) )
return bpe_tokens
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : int = "".join(UpperCamelCase__ )
snake_case : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case : List[str] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" )
snake_case : Optional[Any] = 0
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
snake_case : List[Any] = token_index
writer.write(" ".join(UpperCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : str = [self.cls_token_id]
snake_case : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
snake_case : Any = " " + text
return (text, kwargs)
| 370
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
__snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
__snake_case = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 112
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A :
def __init__(self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=9_9 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : List[Any]=5 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Tuple=5_1_2 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
UpperCAmelCase__ = self.vocab_size - 1
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , *__UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , *__UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , *__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTDoubleHeadsModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = OpenAIGPTForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ (self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ (self : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str=False ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase , )
UpperCAmelCase__ = inputs_dict["labels"]
UpperCAmelCase__ = inputs_dict["labels"]
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__UpperCAmelCase , )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=3_7 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = OpenAIGPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__UpperCAmelCase ) # the president is
UpperCAmelCase__ = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase__ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCAmelCase )
| 65
|
from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCAmelCase : Optional[int] = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : Optional[int] = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : str = [5, 11, 17, 23]
lowerCAmelCase : Tuple = [256, 512, 1_024, 1_024]
lowerCAmelCase : Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase : Optional[int] = 768
lowerCAmelCase : int = [1, 1, 1, 0.5]
lowerCAmelCase : List[Any] = [256, 512, 768, 768]
lowerCAmelCase : List[Any] = 150
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : Union[str, Any] = (1, 384, 384)
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[str] = 'project'
if "ade" in checkpoint_url:
lowerCAmelCase : Tuple = True
lowerCAmelCase : str = 768
lowerCAmelCase : List[str] = [1, 1, 1, 0.5]
lowerCAmelCase : Optional[Any] = 150
lowerCAmelCase : List[str] = 16
lowerCAmelCase : Dict = 'huggingface/label-files'
lowerCAmelCase : Optional[Any] = 'ade20k-id2label.json'
lowerCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase, _UpperCAmelCase, repo_type='dataset' ) ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : int = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase : Optional[int] = name.replace('pretrained.model', 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase : Dict = name.replace('pretrained.model', 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase : int = name.replace('patch_embed', '' )
if "pos_embed" in name:
lowerCAmelCase : Any = name.replace('pos_embed', 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase : str = name.replace('attn.proj', 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase : Union[str, Any] = name.replace('proj', 'projection' )
if "blocks" in name:
lowerCAmelCase : List[str] = name.replace('blocks', 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase : Optional[Any] = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : Any = name.replace('mlp.fc2', 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase : str = name.replace('norm2', 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase : int = name.replace('scratch.output_conv', 'head' )
if "scratch" in name:
lowerCAmelCase : Optional[int] = name.replace('scratch', 'neck' )
if "layer1_rn" in name:
lowerCAmelCase : int = name.replace('layer1_rn', 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase : Optional[Any] = name.replace('layer2_rn', 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase : List[str] = name.replace('layer3_rn', 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase : int = name.replace('layer4_rn', 'convs.3' )
if "refinenet" in name:
lowerCAmelCase : Optional[int] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase : Any = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCAmelCase : Dict = name.replace('out_conv', 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase : Optional[int] = name.replace('resConfUnit1', 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase : List[str] = name.replace('resConfUnit2', 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase : List[Any] = name.replace('conv1', 'convolution1' )
if "conv2" in name:
lowerCAmelCase : Optional[int] = name.replace('conv2', 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase : List[Any] = name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase : Tuple = name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase : str = name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase : int = name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase : int = name.replace('pretrained', 'dpt' )
if "bn" in name:
lowerCAmelCase : List[str] = name.replace('bn', 'batch_norm' )
if "head" in name:
lowerCAmelCase : Any = name.replace('head', 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase : Dict = name.replace('encoder.norm', 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase : Tuple = name.replace('auxlayer', 'auxiliary_head.head' )
if "backbone" in name:
lowerCAmelCase : Tuple = name.replace('backbone', 'backbone.bit.encoder' )
if ".." in name:
lowerCAmelCase : Optional[Any] = name.replace('..', '.' )
if "stem.conv" in name:
lowerCAmelCase : List[str] = name.replace('stem.conv', 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase : Dict = name.replace('blocks', 'layers' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase : Dict = name.replace('convolution', 'conv' )
if "layer" in name and "backbone" in name:
lowerCAmelCase : Dict = name.replace('layer', 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase : List[str] = name.replace('backbone.bit.encoder.bit', 'backbone.bit' )
if "embedder.conv" in name:
lowerCAmelCase : Any = name.replace('embedder.conv', 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase : Optional[int] = name.replace('backbone.bit.encoder.stem.norm', 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : List[Any] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCAmelCase : int = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Tuple = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[str] = get_dpt_config(_UpperCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase : str = torch.load(_UpperCAmelCase, map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase : str = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase : int = val
# read in qkv matrices
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase )
# load HuggingFace model
lowerCAmelCase : int = DPTForSemanticSegmentation(_UpperCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase : str = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase : Dict = DPTImageProcessor(size=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : Union[str, Any] = image_processor(_UpperCAmelCase, return_tensors='pt' )
# forward pass
lowerCAmelCase : Optional[Any] = model(**_UpperCAmelCase ).logits if 'ade' in checkpoint_url else model(**_UpperCAmelCase ).predicted_depth
if show_prediction:
lowerCAmelCase : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode='bicubic', align_corners=_UpperCAmelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__A : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 138
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Any = TypeVar('''T''')
class __A ( Generic[T] ):
def __init__( self : Dict , UpperCAmelCase_ : list[T] , UpperCAmelCase_ : Callable[[T, T], T] ):
lowerCAmelCase : Any | T = None
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase : List[Any] = fnc
self.build()
def lowercase__ ( self : str ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : T ):
p += self.N
lowerCAmelCase : int = v
while p > 1:
lowerCAmelCase : List[Any] = p // 2
lowerCAmelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): # noqa: E741
lowerCAmelCase , lowerCAmelCase : str = l + self.N, r + self.N
lowerCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase : Any = self.st[l] if res is None else self.fn(UpperCAmelCase_ , self.st[l] )
if r % 2 == 0:
lowerCAmelCase : Optional[int] = self.st[r] if res is None else self.fn(UpperCAmelCase_ , self.st[r] )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Optional[int] = SegmentTree(test_array, min)
__A : Optional[int] = SegmentTree(test_array, max)
__A : Dict = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase, len(_UpperCAmelCase ) ):
lowerCAmelCase : str = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : Dict = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : str = reduce(lambda _UpperCAmelCase, _UpperCAmelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__A : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 138
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt_bigcode'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=5_02_57 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=5_02_56 , lowerCAmelCase_ : Dict=5_02_56 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =vocab_size
A__ : Optional[Any] =n_positions
A__ : List[str] =n_embd
A__ : str =n_layer
A__ : Optional[int] =n_head
A__ : Optional[int] =n_inner
A__ : int =activation_function
A__ : int =resid_pdrop
A__ : int =embd_pdrop
A__ : Dict =attn_pdrop
A__ : Any =layer_norm_epsilon
A__ : List[Any] =initializer_range
A__ : Dict =scale_attn_weights
A__ : Any =use_cache
A__ : List[Any] =attention_softmax_in_fpaa
A__ : Optional[int] =scale_attention_softmax_in_fpaa
A__ : Dict =multi_query
A__ : List[str] =bos_token_id
A__ : Any =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 363
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136
| 0
|
from functools import lru_cache
@lru_cache
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39
| 1
|
import pytest
_lowerCamelCase : Optional[int] = """__dummy_dataset1__"""
_lowerCamelCase : Optional[Any] = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = dataset_loading_script_name
A__ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=lowercase_ )
A__ = script_dir / f"""{script_name}.py"""
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
| 231
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = []
A__ = []
A__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
A__ = len(lowercase_ ) if (len(lowercase_ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(lowercase_ ) , '''Postfix'''.center(lowercase_ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase_ ) == 0:
stack.append(lowercase_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase_ ) # push x to stack
print(
x.center(8 ) , (''''''.join(lowercase_ )).ljust(lowercase_ ) , (''''''.join(lowercase_ )).ljust(lowercase_ ) , sep=''' | ''' , ) # Output in tabular format
while len(lowercase_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(lowercase_ )).ljust(lowercase_ ) , (''''''.join(lowercase_ )).ljust(lowercase_ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(lowercase_ ) # return Postfix as str
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase_ ) ):
if infix[i] == "(":
A__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
A__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(lowercase_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowerCamelCase : List[str] = input("""\nEnter an Infix Equation = """) # Input an Infix equation
_lowerCamelCase : Dict = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 231
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
_A = '''yolos'''
def __init__( self :str , a :List[str]=7_6_8 , a :Any=1_2 , a :Optional[int]=1_2 , a :Dict=3_0_7_2 , a :List[str]="gelu" , a :int=0.0 , a :str=0.0 , a :Optional[int]=0.02 , a :Any=1E-1_2 , a :Optional[int]=[5_1_2, 8_6_4] , a :List[str]=1_6 , a :List[Any]=3 , a :Optional[Any]=True , a :Tuple=1_0_0 , a :Optional[int]=True , a :List[str]=False , a :Any=1 , a :Dict=5 , a :int=2 , a :Optional[int]=5 , a :str=2 , a :Tuple=0.1 , **a :Dict , ) -> Any:
super().__init__(**lowerCAmelCase__ )
__UpperCamelCase : str = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Dict = num_attention_heads
__UpperCamelCase : List[Any] = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : str = hidden_dropout_prob
__UpperCamelCase : str = attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : List[Any] = layer_norm_eps
__UpperCamelCase : str = image_size
__UpperCamelCase : List[Any] = patch_size
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : Union[str, Any] = qkv_bias
__UpperCamelCase : Optional[int] = num_detection_tokens
__UpperCamelCase : Tuple = use_mid_position_embeddings
__UpperCamelCase : Any = auxiliary_loss
# Hungarian matcher
__UpperCamelCase : Tuple = class_cost
__UpperCamelCase : Dict = bbox_cost
__UpperCamelCase : List[Any] = giou_cost
# Loss coefficients
__UpperCamelCase : Tuple = bbox_loss_coefficient
__UpperCamelCase : List[str] = giou_loss_coefficient
__UpperCamelCase : Optional[Any] = eos_coefficient
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
_A = version.parse('1.11')
@property
def _lowerCamelCase ( self :int ) -> str:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self :Any ) -> Union[str, Any]:
return 1E-4
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
return 1_2
| 232
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ : List[Any] = '''base_with_context'''
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : Tuple = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""attention"""]
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : Tuple = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""attention"""]
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__SCREAMING_SNAKE_CASE : str = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = ly_weight["""self_attention"""]
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""MultiHeadDotProductAttention_0"""]
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
__SCREAMING_SNAKE_CASE : Any = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
__SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__SCREAMING_SNAKE_CASE : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__SCREAMING_SNAKE_CASE : Tuple = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__SCREAMING_SNAKE_CASE : int = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCamelCase__ : List[str] = parser.parse_args()
main(args)
| 112
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __snake_case (_a ):
lowerCAmelCase__ = "mctct"
def __init__( self : List[str] , _UpperCAmelCase : Union[str, Any]=8065 , _UpperCAmelCase : Optional[int]=1536 , _UpperCAmelCase : Tuple=36 , _UpperCAmelCase : Optional[Any]=6144 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : List[str]=384 , _UpperCAmelCase : Optional[int]=920 , _UpperCAmelCase : List[str]=1E-5 , _UpperCAmelCase : Optional[Any]=0.3 , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Tuple=0.3 , _UpperCAmelCase : Dict=0.3 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : str=0.3 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Union[str, Any]=(7,) , _UpperCAmelCase : Dict=(3,) , _UpperCAmelCase : Union[str, Any]=80 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Tuple="sum" , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : int , ) -> int:
'''simple docstring'''
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : int = attention_head_dim
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Any = layerdrop
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = pad_token_id
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : Optional[int] = eos_token_id
_lowerCAmelCase : int = conv_glu_dim
_lowerCAmelCase : Any = conv_dropout
_lowerCAmelCase : str = num_conv_layers
_lowerCAmelCase : Tuple = input_feat_per_channel
_lowerCAmelCase : str = input_channels
_lowerCAmelCase : Tuple = conv_channels
_lowerCAmelCase : int = ctc_loss_reduction
_lowerCAmelCase : List[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase : str = list(_UpperCAmelCase )
_lowerCAmelCase : List[str] = list(_UpperCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 159
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __snake_case (_a ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : str = """pt"""
_lowerCAmelCase : List[Any] = """tf"""
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase )
model_tf.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """mock_framework"""
# Framework provided - return whatever the user provides
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : str = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : str = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : Tuple = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
| 159
| 1
|
import random
class A__ :
"""simple docstring"""
@staticmethod
def __lowercase ( lowercase) -> tuple[list[int], list[int]]:
'''simple docstring'''
a__ : Union[str, Any] = [ord(lowercase) for i in text]
a__ : int = []
a__ : List[str] = []
for i in plain:
a__ : Optional[Any] = random.randint(1 , 300)
a__ : Optional[Any] = (i + k) * k
cipher.append(lowercase)
key.append(lowercase)
return cipher, key
@staticmethod
def __lowercase ( lowercase , lowercase) -> str:
'''simple docstring'''
a__ : str = []
for i in range(len(lowercase)):
a__ : Dict = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowercase))
return "".join(lowercase)
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 99
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase ( A_ ):
A__ : torch.FloatTensor
class UpperCAmelCase ( A_ ,A_ ):
@register_to_config
def __init__(self : str , snake_case__ : int = 6_55_36 , snake_case__ : Optional[int] = None , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 0 , snake_case__ : str = "fourier" , snake_case__ : bool = True , snake_case__ : bool = False , snake_case__ : float = 0.0 , snake_case__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case__ : Tuple[str] = "UNetMidBlock1D" , snake_case__ : str = None , snake_case__ : Tuple[int] = (32, 32, 64) , snake_case__ : str = None , snake_case__ : int = 8 , snake_case__ : int = 1 , snake_case__ : bool = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case : Any = sample_size
# time
if time_embedding_type == "fourier":
snake_case : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=snake_case__ , log=snake_case__ , flip_sin_to_cos=snake_case__ )
snake_case : Union[str, Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case : Union[str, Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=snake_case__ , downscale_freq_shift=snake_case__ )
snake_case : Optional[int] = block_out_channels[0]
if use_timestep_embedding:
snake_case : Union[str, Any] = block_out_channels[0] * 4
snake_case : Dict = TimestepEmbedding(
in_channels=snake_case__ , time_embed_dim=snake_case__ , act_fn=snake_case__ , out_dim=block_out_channels[0] , )
snake_case : Tuple = nn.ModuleList([] )
snake_case : Tuple = None
snake_case : Union[str, Any] = nn.ModuleList([] )
snake_case : Optional[Any] = None
# down
snake_case : Optional[int] = in_channels
for i, down_block_type in enumerate(snake_case__ ):
snake_case : Optional[int] = output_channel
snake_case : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case : Optional[int] = i == len(snake_case__ ) - 1
snake_case : Union[str, Any] = get_down_block(
snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(snake_case__ )
# mid
snake_case : Tuple = get_mid_block(
snake_case__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case__ , add_downsample=snake_case__ , )
# up
snake_case : Optional[int] = list(reversed(snake_case__ ) )
snake_case : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
snake_case : str = out_channels
else:
snake_case : Any = block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
snake_case : int = output_channel
snake_case : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(snake_case__ ) - 1 else final_upsample_channels
)
snake_case : int = i == len(snake_case__ ) - 1
snake_case : Dict = get_up_block(
snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(snake_case__ )
snake_case : str = output_channel
# out
snake_case : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case : List[Any] = get_out_block(
out_block_type=snake_case__ , num_groups_out=snake_case__ , embed_dim=block_out_channels[0] , out_channels=snake_case__ , act_fn=snake_case__ , fc_dim=block_out_channels[-1] // 4 , )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : Union[torch.Tensor, float, int] , snake_case__ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
snake_case : Optional[int] = timestep
if not torch.is_tensor(snake_case__ ):
snake_case : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0:
snake_case : str = timesteps[None].to(sample.device )
snake_case : Any = self.time_proj(snake_case__ )
if self.config.use_timestep_embedding:
snake_case : str = self.time_mlp(snake_case__ )
else:
snake_case : List[Any] = timestep_embed[..., None]
snake_case : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case : int = ()
for downsample_block in self.down_blocks:
snake_case , snake_case : List[str] = downsample_block(hidden_states=snake_case__ , temb=snake_case__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case : Dict = self.mid_block(snake_case__ , snake_case__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case : str = down_block_res_samples[-1:]
snake_case : str = down_block_res_samples[:-1]
snake_case : Optional[Any] = upsample_block(snake_case__ , res_hidden_states_tuple=snake_case__ , temb=snake_case__ )
# 5. post-process
if self.out_block:
snake_case : Optional[int] = self.out_block(snake_case__ , snake_case__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case__ )
| 10
|
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Dict ) -> list:
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
UpperCAmelCase_ : str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[list, list, list, list]:
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
UpperCAmelCase_ : Tuple = len(__lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = matrix_length // 2
UpperCAmelCase_ : List[Any] = [[a[i][j] for j in range(__lowerCAmelCase, __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
UpperCAmelCase_ : List[str] = [
[a[i][j] for j in range(__lowerCAmelCase, __lowerCAmelCase )] for i in range(__lowerCAmelCase, __lowerCAmelCase )
]
UpperCAmelCase_ : Optional[int] = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
UpperCAmelCase_ : Optional[Any] = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase, __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> tuple[int, int]:
return len(__lowerCAmelCase ), len(matrix[0] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> None:
print('''\n'''.join(str(__lowerCAmelCase ) for line in matrix ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> list:
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = split_matrix(__lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = split_matrix(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = actual_strassen(__lowerCAmelCase, matrix_subtraction(__lowerCAmelCase, __lowerCAmelCase ) )
UpperCAmelCase_ : Dict = actual_strassen(matrix_addition(__lowerCAmelCase, __lowerCAmelCase ), __lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = actual_strassen(matrix_addition(__lowerCAmelCase, __lowerCAmelCase ), __lowerCAmelCase )
UpperCAmelCase_ : List[Any] = actual_strassen(__lowerCAmelCase, matrix_subtraction(__lowerCAmelCase, __lowerCAmelCase ) )
UpperCAmelCase_ : Any = actual_strassen(matrix_addition(__lowerCAmelCase, __lowerCAmelCase ), matrix_addition(__lowerCAmelCase, __lowerCAmelCase ) )
UpperCAmelCase_ : str = actual_strassen(matrix_subtraction(__lowerCAmelCase, __lowerCAmelCase ), matrix_addition(__lowerCAmelCase, __lowerCAmelCase ) )
UpperCAmelCase_ : List[str] = actual_strassen(matrix_subtraction(__lowerCAmelCase, __lowerCAmelCase ), matrix_addition(__lowerCAmelCase, __lowerCAmelCase ) )
UpperCAmelCase_ : Any = matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase, __lowerCAmelCase ), __lowerCAmelCase ), __lowerCAmelCase )
UpperCAmelCase_ : List[Any] = matrix_addition(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase_ : List[Any] = matrix_addition(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase_ : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase, __lowerCAmelCase ), __lowerCAmelCase ), __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase_ : Optional[int] = []
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Dict ) -> list:
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
UpperCAmelCase_ : Tuple = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = matrix_dimensions(__lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase_ : List[Any] = max(*__lowerCAmelCase, *__lowerCAmelCase )
UpperCAmelCase_ : Tuple = int(math.pow(2, math.ceil(math.loga(__lowerCAmelCase ) ) ) )
UpperCAmelCase_ : str = matrixa
UpperCAmelCase_ : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1], __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1], __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase_ : Dict = actual_strassen(__lowerCAmelCase, __lowerCAmelCase )
# Removing the additional zeros
for i in range(0, __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1], __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
snake_case_ : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
snake_case_ : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 125
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =parent
_UpperCAmelCase : Dict =batch_size
_UpperCAmelCase : List[Any] =seq_length
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Optional[int] =use_attention_mask
_UpperCAmelCase : Dict =use_token_type_ids
_UpperCAmelCase : Dict =use_labels
_UpperCAmelCase : Optional[Any] =vocab_size
_UpperCAmelCase : str =hidden_size
_UpperCAmelCase : Dict =num_hidden_layers
_UpperCAmelCase : Tuple =num_attention_heads
_UpperCAmelCase : List[str] =intermediate_size
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] =max_position_embeddings
_UpperCAmelCase : Union[str, Any] =type_vocab_size
_UpperCAmelCase : Dict =type_sequence_label_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Any =num_choices
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase : str =None
if self.use_attention_mask:
_UpperCAmelCase : Dict =random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase : Optional[Any] =None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =config_and_inputs
_UpperCAmelCase : List[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] =config_and_inputs
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =True
UpperCAmelCase =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =FlaxRobertaPreLayerNormModelTester(self)
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : Dict =model(np.ones((1, 1)))
self.assertIsNotNone(snake_case)
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : Optional[int] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
_UpperCAmelCase : str =model(snake_case)[0]
_UpperCAmelCase : int =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape) , snake_case)
# compare the actual values for a slice.
_UpperCAmelCase : List[str] =np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : List[str] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
_UpperCAmelCase : Tuple =model(snake_case)[0]
# compare the actual values for a slice.
_UpperCAmelCase : List[str] =np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
| 242
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _lowerCAmelCase ( __a ):
_lowercase =['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
lowerCAmelCase_ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , param_name="crop_size" )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" in size:
lowerCAmelCase_ = get_resize_output_image_size(_UpperCamelCase , size["shortest_edge"] , default_to_square=_UpperCamelCase )
elif "height" in size and "width" in size:
lowerCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ = to_numpy_array(_UpperCamelCase )
if do_resize:
lowerCAmelCase_ = self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase )
if do_center_crop:
lowerCAmelCase_ = self.center_crop(_UpperCamelCase , size=_UpperCamelCase )
if do_rescale:
lowerCAmelCase_ = self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase )
if do_normalize:
lowerCAmelCase_ = self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase )
lowerCAmelCase_ = to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase )
return image
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(_UpperCamelCase , param_name="crop_size" )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowerCAmelCase_ = make_batched(_UpperCamelCase )
lowerCAmelCase_ = [
[
self._preprocess_image(
image=_UpperCamelCase , do_resize=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , do_center_crop=_UpperCamelCase , crop_size=_UpperCamelCase , do_rescale=_UpperCamelCase , rescale_factor=_UpperCamelCase , do_normalize=_UpperCamelCase , image_mean=_UpperCamelCase , image_std=_UpperCamelCase , data_format=_UpperCamelCase , )
for img in video
]
for video in videos
]
lowerCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 231
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def __a ( *_UpperCamelCase , **_UpperCamelCase ) -> str:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
_lowercase =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = ObjectDetectionPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_UpperCamelCase , {
"score": ANY(_UpperCamelCase ),
"label": ANY(_UpperCamelCase ),
"box": {"xmin": ANY(_UpperCamelCase ), "ymin": ANY(_UpperCamelCase ), "xmax": ANY(_UpperCamelCase ), "ymax": ANY(_UpperCamelCase )},
} , )
import datasets
lowerCAmelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCAmelCase_ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowerCAmelCase_ = object_detector(_UpperCamelCase , threshold=0.0 )
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(_UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_UpperCamelCase , {
"score": ANY(_UpperCamelCase ),
"label": ANY(_UpperCamelCase ),
"box": {"xmin": ANY(_UpperCamelCase ), "ymin": ANY(_UpperCamelCase ), "xmax": ANY(_UpperCamelCase ), "ymax": ANY(_UpperCamelCase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __a ( self ) -> Optional[int]:
pass
@require_torch
def __a ( self ) -> str:
lowerCAmelCase_ = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> str:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = 0.9985
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_UpperCamelCase )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_UpperCamelCase )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "Narsil/layoutlmv3-finetuned-funsd"
lowerCAmelCase_ = 0.9993
lowerCAmelCase_ = pipeline("object-detection" , model=_UpperCamelCase , threshold=_UpperCamelCase )
lowerCAmelCase_ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 231
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
a = 42
a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
a = 42
a = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 370
|
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , A__ , )
| 218
| 0
|
def _lowerCAmelCase ( lowerCAmelCase_ :int )->bool:
'''simple docstring'''
snake_case_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowerCAmelCase ( lowerCAmelCase_ :int = 5_000 )->int:
'''simple docstring'''
snake_case_ = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ = pentagonal_nums[j]
snake_case_ = pentagonal_i + pentagonal_j
snake_case_ = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 159
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case_ = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case_ = self.block_out_channels[i]
snake_case_ = self.block_out_channels[i + 1]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = blocks
snake_case_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
for block in self.blocks:
snake_case_ = block(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
snake_case_ = self.conv_out(_lowerCAmelCase )
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , a , a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = "rgb"
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(_lowerCAmelCase )
snake_case_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def lowerCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
snake_case_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case_ = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = []
snake_case_ = block_out_channels[0]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
for _ in range(self.layers_per_block ):
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
if not is_final_block:
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
snake_case_ = down_blocks
snake_case_ = controlnet_down_blocks
# mid
snake_case_ = block_out_channels[-1]
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case_ = jnp.flip(_lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(_lowerCAmelCase , 0 )
snake_case_ = self.time_proj(_lowerCAmelCase )
snake_case_ = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.controlnet_cond_embedding(_lowerCAmelCase )
sample += controlnet_cond
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
snake_case_ = ()
for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ):
snake_case_ = controlnet_block(_lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case_ = controlnet_down_block_res_samples
snake_case_ = self.controlnet_mid_block(_lowerCAmelCase )
# 6. scaling
snake_case_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
| 159
| 1
|
"""simple docstring"""
from math import sqrt
def _lowerCAmelCase ( lowercase_ = 1000000 ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 359
|
"""simple docstring"""
from collections import deque
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :str , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = process_name # process name
UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase = arrival_time
UpperCAmelCase = burst_time # remaining burst time
UpperCAmelCase = 0 # total time of the process wait in ready queue
UpperCAmelCase = 0 # time from arrival time to completion time
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :int , lowercase_ :list[int] , lowercase_ :deque[Process] , lowercase_ :int , ) -> None:
# total number of mlfq's queues
UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase = queue
# current time
UpperCAmelCase = current_time
# finished process is in this sequence queue
UpperCAmelCase = deque()
def UpperCAmelCase__ ( self :Optional[int] ) -> list[str]:
UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__ ( self :Dict , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__ ( self :str , lowercase_ :deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self :int , lowercase_ :Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :deque[Process] ) -> deque[Process]:
UpperCAmelCase = deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase = 0
# set the process's turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self :Tuple , lowercase_ :deque[Process] , lowercase_ :int ) -> tuple[deque[Process], deque[Process]]:
UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase = 0
# set the finish time
UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self :Optional[Any] ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
snake_case_ = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 181
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__(self : str , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ) ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__: Optional[int] =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_)
lowerCamelCase__: int =2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__: str =Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__: List[Any] =block_out_channels[0] * 4
lowerCamelCase__: Tuple =TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
lowerCamelCase__: List[str] =nn.ModuleList([])
lowerCamelCase__: List[str] =None
lowerCamelCase__: List[str] =nn.ModuleList([])
lowerCamelCase__: str =None
# down
lowerCamelCase__: Optional[Any] =in_channels
for i, down_block_type in enumerate(UpperCAmelCase_):
lowerCamelCase__: Optional[Any] =output_channel
lowerCamelCase__: List[str] =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__: Optional[Any] =i == len(UpperCAmelCase_) - 1
lowerCamelCase__: int =get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_)
# mid
lowerCamelCase__: Any =get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
lowerCamelCase__: Optional[Any] =list(reversed(UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__: Tuple =out_channels
else:
lowerCamelCase__: Optional[int] =block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_):
lowerCamelCase__: int =output_channel
lowerCamelCase__: str =(
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_) - 1 else final_upsample_channels
)
lowerCamelCase__: Union[str, Any] =i == len(UpperCAmelCase_) - 1
lowerCamelCase__: Any =get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =output_channel
# out
lowerCamelCase__: Optional[Any] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
lowerCamelCase__: Union[str, Any] =get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ) ->Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =timestep
if not torch.is_tensor(UpperCAmelCase_):
lowerCamelCase__: Any =torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0:
lowerCamelCase__: Optional[Any] =timesteps[None].to(sample.device)
lowerCamelCase__: Union[str, Any] =self.time_proj(UpperCAmelCase_)
if self.config.use_timestep_embedding:
lowerCamelCase__: Dict =self.time_mlp(UpperCAmelCase_)
else:
lowerCamelCase__: Optional[Any] =timestep_embed[..., None]
lowerCamelCase__: Union[str, Any] =timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
lowerCamelCase__: List[str] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
lowerCamelCase__: List[str] =()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__: Any =downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__: int =self.mid_block(UpperCAmelCase_ , UpperCAmelCase_)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
lowerCamelCase__: Union[str, Any] =down_block_res_samples[-1:]
lowerCamelCase__: Optional[int] =down_block_res_samples[:-1]
lowerCamelCase__: List[str] =upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_)
# 5. post-process
if self.out_block:
lowerCamelCase__: List[Any] =self.out_block(UpperCAmelCase_ , UpperCAmelCase_)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_)
| 10
|
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Dict ={
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =["LayoutLMv2FeatureExtractor"]
_lowercase : Union[str, Any] =["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = params
a__ : str = np.array(__lowercase )
a__ : List[Any] = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowercase ) -> Any:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.params.max_model_input_size
a__ : int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(__lowercase , __lowercase ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
a__ : Any = []
a__ : Optional[int] = []
if self.params.mlm:
a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : str = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
a__ : Optional[int] = np.array(__lowercase )
a__ : Any = np.array(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = len(self )
a__ : List[str] = self.lengths > 1_1
a__ : Dict = self.token_ids[indices]
a__ : List[str] = self.lengths[indices]
a__ : int = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
a__ : List[Any] = len(self )
a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5
a__ : Tuple = self.token_ids[indices]
a__ : Union[str, Any] = self.lengths[indices]
a__ : Tuple = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = [t[0] for t in batch]
a__ : Any = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
a__ : List[Any] = max(__lowercase )
# Pad token ids
if self.params.mlm:
a__ : int = self.params.special_tok_ids["""pad_token"""]
else:
a__ : List[str] = self.params.special_tok_ids["""unk_token"""]
a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 266
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase = 1000 ) -> int:
lowerCAmelCase__ : Optional[int] = -1
lowerCAmelCase__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase__ : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase__ : Optional[int] = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase__ : List[Any] = a * b * c
if candidate >= product:
lowerCAmelCase__ : int = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 242
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 1_6
_A = 3_2
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Any = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
__UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1":
lowerCAmelCase__ : List[Any] = 2
# New Code #
lowerCAmelCase__ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Tuple = config["""lr"""]
lowerCAmelCase__ : int = int(config["""num_epochs"""] )
lowerCAmelCase__ : List[Any] = int(config["""seed"""] )
lowerCAmelCase__ : Tuple = int(config["""batch_size"""] )
lowerCAmelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
lowerCAmelCase__ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase )
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a_ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a_ = """▁"""
class __snake_case ( __lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : int = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
__A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
__A : Tuple = do_lower_case
__A : List[Any] = remove_space
__A : List[Any] = keep_accents
__A : int = vocab_file
__A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__A : int = self.__dict__.copy()
__A : Any = None
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A : str = {}
__A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if self.remove_space:
__A : Dict = ''' '''.join(inputs.strip().split() )
else:
__A : List[Any] = inputs
__A : Any = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__A : Tuple = unicodedata.normalize('''NFKD''' , UpperCAmelCase__ )
__A : Union[str, Any] = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
__A : Union[str, Any] = outputs.lower()
return outputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = self.preprocess_text(UpperCAmelCase__ )
__A : Optional[Any] = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
__A : Optional[int] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__A : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__A : Optional[int] = cur_pieces[1:]
else:
__A : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase__ )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = []
__A : List[Any] = ''''''
__A : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
__A : Dict = True
__A : List[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
__A : Optional[int] = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Tuple = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Tuple = [self.sep_token_id]
__A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : int = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 361
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """dinat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Dict = patch_size
__A : Union[str, Any] = num_channels
__A : str = embed_dim
__A : Optional[Any] = depths
__A : int = len(__lowerCamelCase )
__A : Union[str, Any] = num_heads
__A : Tuple = kernel_size
__A : Optional[int] = dilations
__A : Tuple = mlp_ratio
__A : Optional[int] = qkv_bias
__A : int = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : Dict = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : str = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : List[Any] = layer_scale_init_value
__A : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 291
| 0
|
from collections.abc import Callable
def UpperCamelCase ( __lowerCamelCase : Callable[[float], float] , __lowerCamelCase : float , __lowerCamelCase : float ):
snake_case : float = a
snake_case : float = b
if function(__lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCamelCase ) == 0:
return b
elif (
function(__lowerCamelCase ) * function(__lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowerCamelCase ) == 0:
return mid
elif function(__lowerCamelCase ) * function(__lowerCamelCase ) < 0:
snake_case : Optional[int] = mid
else:
snake_case : Optional[int] = mid
snake_case : str = start + (end - start) / 2.0
return mid
def UpperCamelCase ( __lowerCamelCase : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 59
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str ):
"""simple docstring"""
if openai_config_file == "":
__a =OpenAIGPTConfig()
else:
__a =OpenAIGPTConfig.from_json_file(_snake_case )
__a =OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__a =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__a =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _snake_case )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowerCAmelCase : int = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 218
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "maskformer-swin"
_UpperCamelCase : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : int = embed_dim
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : int = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 126
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=0.9 , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 30}
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : str = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : int = crop_pct
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Tuple = image_mean
_lowerCAmelCase : Optional[Any] = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """crop_pct""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : int = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 126
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : List[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__A : Union[str, Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__A : List[str] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__A : str = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__A : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__A : Any = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __snake_case ( __a):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase = DPRContextEncoderTokenizer
class __snake_case ( __a):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase = DPRQuestionEncoderTokenizer
__A : Tuple = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__A : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__A : Optional[int] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__a)
class __snake_case :
"""simple docstring"""
def __call__( self : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[bool] = None , **lowerCamelCase : Optional[Any] , ) -> Any:
if titles is None and texts is None:
return super().__call__(
_A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
elif titles is None or texts is None:
lowerCAmelCase_ : Any = titles if texts is None else texts
return super().__call__(
_A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
lowerCAmelCase_ : Optional[int] = titles if not isinstance(_A , _A ) else [titles]
lowerCAmelCase_ : Optional[int] = texts if not isinstance(_A , _A ) else [texts]
lowerCAmelCase_ : List[Any] = len(_A )
lowerCAmelCase_ : Dict = questions if not isinstance(_A , _A ) else [questions] * n_passages
assert len(_A ) == len(
_A ), F'There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.'
lowerCAmelCase_ : Dict = super().__call__(_A , _A , padding=_A , truncation=_A )['''input_ids''']
lowerCAmelCase_ : List[str] = super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['''input_ids''']
lowerCAmelCase_ : List[str] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A , _A )
]
}
if return_attention_mask is not False:
lowerCAmelCase_ : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase_ : List[str] = attention_mask
return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A )
def __lowercase ( self : Dict , lowerCamelCase : BatchEncoding , lowerCamelCase : DPRReaderOutput , lowerCamelCase : int = 16 , lowerCamelCase : int = 64 , lowerCamelCase : int = 4 , ) -> int:
lowerCAmelCase_ : str = reader_input['''input_ids''']
lowerCAmelCase_ : List[Any] = reader_output[:3]
lowerCAmelCase_ : Tuple = len(_A )
lowerCAmelCase_ : Any = sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ )
lowerCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCAmelCase_ : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase_ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase_ : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase_ : Dict = len(_A )
lowerCAmelCase_ : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowercase ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : List[int] , lowerCamelCase : int , lowerCamelCase : int , ) -> Any:
lowerCAmelCase_ : Optional[int] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase_ : Tuple = sorted(_A , key=lambda lowerCamelCase : x[1] , reverse=_A )
lowerCAmelCase_ : int = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
lowerCAmelCase_ : int = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__a)
class __snake_case ( __a ,__a):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ['input_ids', 'attention_mask']
lowercase = DPRReaderTokenizer
| 120
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : str = R'''\w+[.]\d+'''
UpperCAmelCase__ : List[Any] = re.findall(lowerCAmelCase__ , lowerCAmelCase__ )
for pat in pats:
UpperCAmelCase__ : Union[str, Any] = key.replace(lowerCAmelCase__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase__ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ : int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=42 ) -> Tuple:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(lowerCAmelCase__ ) )
UpperCAmelCase__ : Optional[Any] = flatten_dict(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[int] = rename_key(lowerCAmelCase__ )
UpperCAmelCase__ : str = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = rename_key_and_reshape_tensor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
| 181
| 0
|
import numpy as np
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> str:
UpperCAmelCase_ : str = (0, 0)
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[Any] = 0
def __eq__( self , _UpperCamelCase ) -> Optional[int]:
return self.position == cell.position
def __UpperCAmelCase ( self ) -> List[Any]:
print(self.position )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=(5, 5) ) -> int:
UpperCAmelCase_ : Tuple = np.zeros(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = world_size[0]
UpperCAmelCase_ : List[Any] = world_size[1]
def __UpperCAmelCase ( self ) -> Dict:
print(self.w )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Dict = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ : Any = cell.position[0]
UpperCAmelCase_ : Union[str, Any] = cell.position[1]
UpperCAmelCase_ : int = []
for n in neughbour_cord:
UpperCAmelCase_ : List[str] = current_x + n[0]
UpperCAmelCase_ : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ : Union[str, Any] = Cell()
UpperCAmelCase_ : Union[str, Any] = (x, y)
UpperCAmelCase_ : Any = cell
neighbours.append(_UpperCamelCase )
return neighbours
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[Any] = []
_open.append(__snake_case )
while _open:
UpperCAmelCase_ : Dict = np.argmin([n.f for n in _open] )
UpperCAmelCase_ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(__snake_case ) )
if current == goal:
break
for n in world.get_neigbours(__snake_case ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ : Tuple = current.g + 1
UpperCAmelCase_ : List[Any] = n.position
UpperCAmelCase_ : str = goal.position
UpperCAmelCase_ : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__snake_case )
UpperCAmelCase_ : List[Any] = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__UpperCAmelCase = Gridworld()
# Start position and goal
__UpperCAmelCase = Cell()
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = Cell()
__UpperCAmelCase = (4, 4)
print(F'path from {start.position} to {goal.position}')
__UpperCAmelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__UpperCAmelCase = 1
print(world.w)
| 362
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__UpperCAmelCase = 'Usage of script: script_name <size_of_canvas:int>'
__UpperCAmelCase = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def lowercase__ ( __snake_case : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
UpperCAmelCase_ : Tuple = bool(random.getrandbits(1 ) )
def lowercase__ ( __snake_case : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.array(__snake_case )
UpperCAmelCase_ : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
UpperCAmelCase_ : Optional[int] = __judge_point(
__snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase_ : List[Any] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowercase__ ( __snake_case : bool , __snake_case : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase_ : List[Any] = pt
if pt:
if alive < 2:
UpperCAmelCase_ : str = False
elif alive == 2 or alive == 3:
UpperCAmelCase_ : int = True
elif alive > 3:
UpperCAmelCase_ : List[Any] = False
else:
if alive == 3:
UpperCAmelCase_ : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__UpperCAmelCase = int(sys.argv[1])
# main working structure of this module.
__UpperCAmelCase = create_canvas(canvas_size)
seed(c)
__UpperCAmelCase , __UpperCAmelCase = plt.subplots()
fig.show()
__UpperCAmelCase = ListedColormap(['w', 'k'])
try:
while True:
__UpperCAmelCase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 145
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = Dict[str, Any]
lowerCamelCase : Dict = List[Prediction]
@add_end_docstrings(_lowerCAmelCase )
class __lowerCAmelCase (_lowerCAmelCase ):
'''simple docstring'''
def __init__(self : Dict , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase__ (self : Optional[Any] , **UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = {}
if "threshold" in kwargs:
lowercase__ = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__(self : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase__ (self : Any , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = load_image(_lowerCamelCase )
lowercase__ = torch.IntTensor([[image.height, image.width]] )
lowercase__ = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
lowercase__ = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
lowercase__ = target_size
return inputs
def UpperCamelCase__ (self : int , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = model_inputs.pop('''target_size''' )
lowercase__ = self.model(**_lowerCamelCase )
lowercase__ = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
lowercase__ = model_inputs['''bbox''']
return model_outputs
def UpperCamelCase__ (self : int , UpperCamelCase : str , UpperCamelCase : Optional[Any]=0.9 ):
'''simple docstring'''
lowercase__ = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase__ ,lowercase__ = target_size[0].tolist()
def unnormalize(UpperCamelCase : Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowercase__ ,lowercase__ = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase__ = [unnormalize(_lowerCamelCase ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
lowercase__ = ['''score''', '''label''', '''box''']
lowercase__ = [dict(zip(_lowerCamelCase , _lowerCamelCase ) ) for vals in zip(scores.tolist() , _lowerCamelCase , _lowerCamelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase__ = self.image_processor.post_process_object_detection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = raw_annotations[0]
lowercase__ = raw_annotation['''scores''']
lowercase__ = raw_annotation['''labels''']
lowercase__ = raw_annotation['''boxes''']
lowercase__ = scores.tolist()
lowercase__ = [self.model.config.idalabel[label.item()] for label in labels]
lowercase__ = [self._get_bounding_box(_lowerCamelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase__ = ['''score''', '''label''', '''box''']
lowercase__ = [
dict(zip(_lowerCamelCase , _lowerCamelCase ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = box.int().tolist()
lowercase__ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 2
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
], )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ):
'''simple docstring'''
__A = compute_mauve(
p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, )
return out
| 266
| 0
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( __magic_name__ : int ) -> list[int]:
"""simple docstring"""
lowercase__ = str(__magic_name__ )
lowercase__ = [n]
for i in range(1 , len(__magic_name__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
if len(str(__magic_name__ ) ) > 3:
if not is_prime(int(str(__magic_name__ )[-3:] ) ) or not is_prime(int(str(__magic_name__ )[:3] ) ):
return False
return True
def UpperCamelCase ( __magic_name__ : int = 11 ) -> list[int]:
"""simple docstring"""
lowercase__ = []
lowercase__ = 13
while len(__magic_name__ ) != count:
if validate(__magic_name__ ):
lowercase__ = list_truncated_nums(__magic_name__ )
if all(is_prime(__magic_name__ ) for i in list_nums ):
list_truncated_primes.append(__magic_name__ )
num += 2
return list_truncated_primes
def UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 146
|
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146
| 1
|
from __future__ import annotations
from math import pi, sqrt
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "sew-d"
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = hidden_size
lowerCamelCase = feat_extract_norm
lowerCamelCase = feat_extract_activation
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = conv_bias
lowerCamelCase = num_conv_pos_embeddings
lowerCamelCase = num_conv_pos_embedding_groups
lowerCamelCase = len(self.conv_dim )
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = squeeze_factor
lowerCamelCase = max_position_embeddings
lowerCamelCase = position_buckets
lowerCamelCase = share_att_key
lowerCamelCase = relative_attention
lowerCamelCase = norm_rel_ebd
lowerCamelCase = list(_a )
lowerCamelCase = hidden_act
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = feat_proj_dropout
lowerCamelCase = final_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = feature_layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase = apply_spec_augment
lowerCamelCase = mask_time_prob
lowerCamelCase = mask_time_length
lowerCamelCase = mask_time_min_masks
lowerCamelCase = mask_feature_prob
lowerCamelCase = mask_feature_length
lowerCamelCase = mask_feature_min_masks
# ctc loss
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# sequence classification
lowerCamelCase = use_weighted_layer_sum
lowerCamelCase = classifier_proj_size
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291
| 0
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ : Optional[Any] ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( A__ ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( A__, A__ ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ : List[str] = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE_ : int = False
def a__ ( A__, A__ ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force', action='store_true', help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path', type=A__, required=A__, help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file', type=A__, required=A__, help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.', )
parser.add_argument(
'--student_type', type=A__, choices=['distilbert', 'roberta', 'gpt2'], required=A__, help='The student type (DistilBERT, RoBERTa).', )
parser.add_argument('--student_config', type=A__, required=A__, help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights', default=A__, type=A__, help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type', choices=['bert', 'roberta', 'gpt2'], required=A__, help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name', type=A__, required=A__, help='The teacher model.' )
parser.add_argument('--temperature', default=2.0, type=A__, help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce', default=0.5, type=A__, help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm', default=0.0, type=A__, help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.', )
parser.add_argument('--alpha_clm', default=0.5, type=A__, help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse', default=0.0, type=A__, help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos', default=0.0, type=A__, help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm', action='store_true', help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop', default=0.15, type=A__, help='Proportion of tokens for which we need to make a prediction.', )
parser.add_argument('--word_mask', default=0.8, type=A__, help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep', default=0.1, type=A__, help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand', default=0.1, type=A__, help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing', default=0.7, type=A__, help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).', )
parser.add_argument('--token_counts', type=A__, help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask', action='store_true', help='If true, compute the distillation loss only the [MLM] prediction distribution.', )
parser.add_argument(
'--freeze_pos_embs', action='store_true', help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.', )
parser.add_argument(
'--freeze_token_type_embds', action='store_true', help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.', )
parser.add_argument('--n_epoch', type=A__, default=3, help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size', type=A__, default=5, help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size', action='store_false', help='If true, group sequences that have similar length into the same batch. Default is true.', )
parser.add_argument(
'--gradient_accumulation_steps', type=A__, default=5_0, help='Gradient accumulation for larger training batches.', )
parser.add_argument('--warmup_prop', default=0.05, type=A__, help='Linear warmup proportion.' )
parser.add_argument('--weight_decay', default=0.0, type=A__, help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate', default=5E-4, type=A__, help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon', default=1E-6, type=A__, help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm', default=5.0, type=A__, help='Max gradient norm.' )
parser.add_argument('--initializer_range', default=0.02, type=A__, help='Random initialization range.' )
parser.add_argument(
'--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit', )
parser.add_argument(
'--fp16_opt_level', type=A__, default='O1', help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
), )
parser.add_argument('--n_gpu', type=A__, default=1, help='Number of GPUs in the node.' )
parser.add_argument('--local_rank', type=A__, default=-1, help='Distributed training - Local rank' )
parser.add_argument('--seed', type=A__, default=5_6, help='Random seed' )
parser.add_argument('--log_interval', type=A__, default=5_0_0, help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval', type=A__, default=4_0_0_0, help='Checkpoint interval.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
sanity_checks(A__ )
# ARGS #
init_gpu_params(A__ )
set_seed(A__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, 'parameters.json' ), 'w' ) as f:
json.dump(vars(A__ ), A__, indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE_ : str = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE_ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE_ : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE_ : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.all_special_tokens.index(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
SCREAMING_SNAKE_CASE_ : int = special_tok_ids
SCREAMING_SNAKE_CASE_ : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ : Tuple = pickle.load(A__ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ : int = pickle.load(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.maximum(A__, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE_ : int = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE_ : List[str] = torch.from_numpy(A__ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = LmSeqsDataset(params=A__, data=A__ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
SCREAMING_SNAKE_CASE_ : Any = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE_ : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
SCREAMING_SNAKE_CASE_ : Dict = student_model_class.from_pretrained(args.student_pretrained_weights, config=A__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = student_model_class(A__ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
SCREAMING_SNAKE_CASE_ : Tuple = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=A__ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A__, A__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A__, A__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Distiller(
params=A__, dataset=A__, token_probs=A__, student=A__, teacher=A__ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 363
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a__ ( ):
raise RuntimeError('CUDA out of memory.' )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ : Tuple = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ : str = nn.Linear(4 , 5 )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 162
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[str] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Any , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :int , **lowerCamelCase_ :int ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :str ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[str] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Any , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :str ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[str] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Dict , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Any ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Any , *lowerCamelCase_ :str , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Any , **lowerCamelCase_ :Any ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :Any , **lowerCamelCase_ :int ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :int , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :int , *lowerCamelCase_ :int , **lowerCamelCase_ :Any ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
| 126
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase = """true"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=8_2 , snake_case_ : Optional[Any]=1_6 ) ->Dict:
set_seed(4_2 )
lowerCamelCase__ : List[Any] =RegressionModel()
lowerCamelCase__ : List[Any] =deepcopy(snake_case_ )
lowerCamelCase__ : List[str] =RegressionDataset(length=snake_case_ )
lowerCamelCase__ : Any =DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : str=False ) ->List[str]:
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ : List[Any] =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case_ : Optional[Any] ):
lowerCamelCase__ : Optional[int] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : Tuple =dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ : List[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] =Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ : List[Any] =get_dataloader(snake_case_ , not dispatch_batches )
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[Any] =[]
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =[], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : Optional[int]=8_2 , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=1_6 ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Any =generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"""
def lowerCAmelCase_ ( snake_case_ : bool = False , snake_case_ : bool = False ) ->str:
lowerCamelCase__ : Dict =evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =setup['no']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**snake_case_ )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['labels'] )
lowerCamelCase__ : Optional[Any] =metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : int =batch['labels']
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowerCamelCase__ : List[str] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : List[str] =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : Dict =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ : List[Any] =Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 126
| 1
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "layer_norm" , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = only_cross_attention
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_UpperCAmelCase = AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase = AdaLayerNormZero(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Attention(
query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_SCREAMING_SNAKE_CASE , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_UpperCAmelCase = (
AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = Attention(
query_dim=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , upcast_attention=_SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
# 3. Feed-forward
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = FeedForward(_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , final_dropout=_SCREAMING_SNAKE_CASE )
# let chunk size default to None
_UpperCAmelCase = None
_UpperCAmelCase = 0
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = chunk_size
_UpperCAmelCase = dim
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Dict:
"""simple docstring"""
if self.use_ada_layer_norm:
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.norma(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype )
else:
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_UpperCAmelCase = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
_UpperCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_UpperCAmelCase = (
self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = attn_output + hidden_states
# 3. Feed-forward
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_UpperCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_UpperCAmelCase = torch.cat(
[self.ff(_SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(_SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_UpperCAmelCase = self.ff(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
_UpperCAmelCase = ff_output + hidden_states
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = False , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = int(dim * mult )
_UpperCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_UpperCAmelCase = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
_UpperCAmelCase = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , approximate='tanh' )
elif activation_fn == "geglu":
_UpperCAmelCase = GEGLU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
_UpperCAmelCase = ApproximateGELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.ModuleList([] )
# project in
self.net.append(_SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for module in self.net:
_UpperCAmelCase = module(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "none" ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = approximate
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.gelu(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , dim_out * 2 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_SCREAMING_SNAKE_CASE )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , embedding_dim * 2 )
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(_SCREAMING_SNAKE_CASE , 2 )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = CombinedTimestepLabelEmbeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE , eps=1e-6 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = emb.chunk(6 , dim=1 )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1e-5 ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = num_groups
_UpperCAmelCase = eps
if act_fn is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = get_activation(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , out_dim * 2 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if self.act:
_UpperCAmelCase = self.act(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.linear(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = emb[:, :, None, None]
_UpperCAmelCase , _UpperCAmelCase = emb.chunk(2 , dim=1 )
_UpperCAmelCase = F.group_norm(_SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps )
_UpperCAmelCase = x * (1 + scale) + shift
return x
| 353
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ :Dict = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCAmelCase__ ( a__: Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
_UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=a__ , default=a__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=a__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=a__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=a__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowerCAmelCase__ ( a__: str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
_UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase = defaults.commands
if not args.tpu_name:
_UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , a__ ):
_UpperCAmelCase = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
_UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCAmelCase = '; '.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(a__ )}''' )
return
subprocess.run(a__ )
print('Successfully setup pod.' )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tpu_command_parser()
_UpperCAmelCase = parser.parse_args()
tpu_command_launcher(a__ )
| 185
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = WavaVecaForSequenceClassification.from_pretrained(a_ , config=a_ )
lowerCAmelCase__ = downstream_dict["projector.weight"]
lowerCAmelCase__ = downstream_dict["projector.bias"]
lowerCAmelCase__ = downstream_dict["model.post_net.linear.weight"]
lowerCAmelCase__ = downstream_dict["model.post_net.linear.bias"]
return model
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(a_ , config=a_ )
lowerCAmelCase__ = downstream_dict["model.linear.weight"]
lowerCAmelCase__ = downstream_dict["model.linear.bias"]
return model
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = WavaVecaForXVector.from_pretrained(a_ , config=a_ )
lowerCAmelCase__ = downstream_dict["connector.weight"]
lowerCAmelCase__ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
lowerCAmelCase__ = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowerCAmelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowerCAmelCase__ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = torch.load(a_ , map_location="cpu" )
lowerCAmelCase__ = checkpoint["Downstream"]
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(a_ )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
lowerCAmelCase__ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowerCAmelCase__ = convert_classification(a_ , a_ , a_ )
elif arch.endswith("ForAudioFrameClassification" ):
lowerCAmelCase__ = convert_diarization(a_ , a_ , a_ )
elif arch.endswith("ForXVector" ):
lowerCAmelCase__ = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 340
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 145
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter image url: ''').strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase_ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase_ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase_ = requests.get(image_url).content
lowerCamelCase_ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 362
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 34
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : Dict = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCamelCase : Union[str, Any] = args.per_device_eval_batch_size
__UpperCamelCase : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : Tuple = True
__UpperCamelCase : Union[str, Any] = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCamelCase : str = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCamelCase : int = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCamelCase : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : List[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
UpperCamelCase__ : str = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
UpperCamelCase__ : Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE )
# start time
UpperCamelCase__ : Union[str, Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : int = end_time - start_time
UpperCamelCase__ : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : str = raw_datasets["validation"].column_names
__UpperCamelCase : List[Any] = "question" if "question" in column_names else column_names[0]
__UpperCamelCase : Dict = "context" if "context" in column_names else column_names[1]
__UpperCamelCase : str = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : List[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__UpperCamelCase : List[str] = min(args.max_seq_length, tokenizer.model_max_length)
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCamelCase__ : List[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCamelCase__ : int = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCamelCase__ : List[Any] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCamelCase__ : Dict = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCamelCase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCamelCase__ : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__UpperCamelCase : str = raw_datasets["validation"]
# Validation Feature Creation
__UpperCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCamelCase : Union[str, Any] = default_data_collator
__UpperCamelCase : List[str] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCamelCase : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]="eval" ):
"""simple docstring"""
UpperCamelCase__ : List[str] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCamelCase__ : List[str] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
UpperCamelCase__ : Optional[Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
UpperCamelCase__ : int = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE )
__UpperCamelCase : int = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : Any = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__UpperCamelCase : str = 0.0
__UpperCamelCase : int = 0
__UpperCamelCase : List[Any] = timeit.default_timer()
__UpperCamelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase , __UpperCamelCase : Optional[Any] = outputs
__UpperCamelCase : List[str] = torch.tensor(start_logits)
__UpperCamelCase : Optional[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : int = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : int = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : Dict = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__UpperCamelCase : int = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 146
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__( self : List[str] , lowercase__ : Tuple , lowercase__ : str=13 , lowercase__ : Dict=30 , lowercase__ : int=2 , lowercase__ : Tuple=3 , lowercase__ : Optional[int]=True , lowercase__ : List[str]=True , lowercase__ : int=32 , lowercase__ : Tuple=5 , lowercase__ : int=4 , lowercase__ : Any=37 , lowercase__ : Dict="gelu" , lowercase__ : Any=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Dict=10 , lowercase__ : Any=0.02 , lowercase__ : List[Any]=3 , lowercase__ : Any=None , lowercase__ : str=2 , ):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[Any]):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __snake_case ( self : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str]):
'''simple docstring'''
lowerCAmelCase__ = DeiTModel(config=lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : str , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = DeiTForMaskedImageModeling(config=lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = DeiTForMaskedImageModeling(lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase__ = model(lowercase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def __snake_case ( self : Any , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = DeiTForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = DeiTForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase__ = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = DeiTModelTester(self)
lowerCAmelCase__ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37)
def __snake_case ( self : int):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def __snake_case ( self : str):
'''simple docstring'''
pass
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowercase__)
lowerCAmelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__)
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__)
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
def __snake_case ( self : Tuple , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : int=False):
'''simple docstring'''
lowerCAmelCase__ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCAmelCase__ = model_class(lowercase__)
model.to(lowercase__)
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
lowerCAmelCase__ = model(**lowercase__).loss
loss.backward()
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase__ = False
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCAmelCase__ = model_class(lowercase__)
model.gradient_checkpointing_enable()
model.to(lowercase__)
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
lowerCAmelCase__ = model(**lowercase__).loss
loss.backward()
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__),
*get_values(lowercase__),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}"""):
lowerCAmelCase__ = problem_type['title']
lowerCAmelCase__ = problem_type['num_labels']
lowerCAmelCase__ = model_class(lowercase__)
model.to(lowercase__)
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
if problem_type["num_labels"] > 1:
lowerCAmelCase__ = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
lowerCAmelCase__ = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__) as warning_list:
lowerCAmelCase__ = model(**lowercase__).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def __snake_case ( self : Dict):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DeiTModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def __lowerCamelCase ( ):
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
lowercase__)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowercase__ , return_tensors='pt').to(lowercase__)
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase__)
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , lowercase__)
lowerCAmelCase__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowercase__ , return_tensors='pt')
lowerCAmelCase__ = inputs.pixel_values.to(lowercase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__)
| 119
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['pixel_values']
def __init__( self : Tuple , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 255 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : bool = True , **lowercase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase__)
lowerCAmelCase__ = size if size is not None else {'height': 384, 'width': 384}
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ = do_convert_rgb
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
lowerCAmelCase__ = (size['height'], size['width'])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Optional[Any] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ):
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Any , lowercase__ : ImageInput , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Dict[str, int]] = None , lowercase__ : PILImageResampling = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[float] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : bool = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(lowercase__) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowercase__) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
lowerCAmelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase__)
return encoded_outputs
| 119
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class __lowerCamelCase (_snake_case ):
_lowercase = """blip_text_model"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0524,A_: List[Any]=768,A_: Dict=768,A_: int=3072,A_: List[str]=768,A_: int=12,A_: str=8,A_: str=512,A_: Optional[int]="gelu",A_: Optional[int]=1E-12,A_: Optional[int]=0.0,A_: Union[str, Any]=0.0,A_: Dict=0.0_2,A_: List[str]=3_0522,A_: str=2,A_: Any=0,A_: Optional[int]=102,A_: Optional[int]=True,A_: List[Any]=True,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase__,bos_token_id=UpperCamelCase__,eos_token_id=UpperCamelCase__,sep_token_id=UpperCamelCase__,**UpperCamelCase__,)
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = encoder_hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = is_decoder
__UpperCamelCase = use_cache
@classmethod
def snake_case_ ( cls: Optional[Any],A_: List[str],**A_: Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
__UpperCamelCase, __UpperCamelCase = cls.get_config_dict(UpperCamelCase__,**UpperCamelCase__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__,**UpperCamelCase__ )
class __lowerCamelCase (_snake_case ):
_lowercase = """blip_vision_model"""
def __init__( self: List[str],A_: Tuple=768,A_: List[Any]=3072,A_: str=512,A_: Optional[Any]=12,A_: Any=12,A_: Optional[Any]=384,A_: str=16,A_: Optional[int]="gelu",A_: Union[str, Any]=1E-5,A_: str=0.0,A_: Any=1E-10,**A_: str,):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def snake_case_ ( cls: Optional[Any],A_: List[str],**A_: Any ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
__UpperCamelCase, __UpperCamelCase = cls.get_config_dict(UpperCamelCase__,**UpperCamelCase__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__,**UpperCamelCase__ )
class __lowerCamelCase (_snake_case ):
_lowercase = """blip"""
_lowercase = True
def __init__( self: str,A_: int=None,A_: Tuple=None,A_: int=512,A_: List[Any]=2.6_5_9_2,A_: str=256,**A_: List[Any],):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase = BlipTextConfig(**UpperCamelCase__ )
__UpperCamelCase = BlipVisionConfig(**UpperCamelCase__ )
__UpperCamelCase = self.vision_config.hidden_size
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
__UpperCamelCase = 0.0_2
__UpperCamelCase = image_text_hidden_size
@classmethod
def snake_case_ ( cls: Optional[Any],A_: Tuple,A_: str,**A_: Any ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(),vision_config=vision_config.to_dict(),**UpperCamelCase__ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
A_ = len(UpperCAmelCase__ )
# We need to create solution object to save path.
A_ = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
A_ = run_maze(UpperCAmelCase__, 0, 0, UpperCAmelCase__ )
if solved:
print("""\n""".join(str(UpperCAmelCase__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> bool:
A_ = len(UpperCAmelCase__ )
# Final check point.
if i == j == (size - 1):
A_ = 1
return True
A_ = (not i < 0) and (not j < 0) # Check lower bounds
A_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ = 1
# check for directions
if (
run_maze(UpperCAmelCase__, i + 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j + 1, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, i - 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j - 1, UpperCAmelCase__ )
):
return True
A_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase = 0.01
with locka.acquire():
with pytest.raises(__a ):
lowercase = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 'a' * 1000 + '.lock'
lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 355
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''')
lowercase = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] ,dtype=tf.intaa), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa),
}
lowercase = model(A__)['''last_hidden_state''']
lowercase = tf.TensorShape((1, 6, 7_6_8))
self.assertEqual(output.shape ,A__)
# compare the actual values for a slice.
lowercase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 97
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class _UpperCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
a_ = 'bloom'
a_ = ['past_key_values']
a_ = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : List[Any] , lowerCAmelCase_ : int=2_5_0_8_8_0 , lowerCAmelCase_ : Union[str, Any]=6_4 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[Any]=8 , lowerCAmelCase_ : Any=1e-5 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Any=False , **lowerCAmelCase_ : Union[str, Any] , ) -> Any:
__lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase = kwargs.pop('n_embed' , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = hidden_size if n_embed is None else n_embed
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = pretraining_tp
__lowerCAmelCase = apply_residual_connection_post_layernorm
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _UpperCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
a_ = version.parse("""1.12""" )
def __init__( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple = "default" , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict = False , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , 'pad_token_id' , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' , inverted_values_shape=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : Any ) -> int:
return self._config.n_layer
@property
def lowercase ( self : Any ) -> int:
return self._config.n_head
@property
def lowercase ( self : str ) -> float:
return 1e-3
def lowercase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : Optional[Any] = -1 , lowerCAmelCase_ : str = False , lowerCAmelCase_ : Union[str, Any] = None , ) -> Mapping[str, Any]:
__lowerCAmelCase = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = self._config.hidden_size // self.num_attention_heads
__lowerCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__lowerCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__lowerCAmelCase = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : List[Any] ) -> int:
return 1_3
| 284
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 185
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
lowercase__ = "xmod"
def __init__( self : Tuple ,lowercase_ : int=3_0_5_2_2 ,lowercase_ : Optional[int]=7_6_8 ,lowercase_ : Optional[Any]=1_2 ,lowercase_ : Any=1_2 ,lowercase_ : Union[str, Any]=3_0_7_2 ,lowercase_ : List[Any]="gelu" ,lowercase_ : List[Any]=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Dict=5_1_2 ,lowercase_ : List[Any]=2 ,lowercase_ : Optional[int]=0.02 ,lowercase_ : Any=1E-12 ,lowercase_ : int=1 ,lowercase_ : Union[str, Any]=0 ,lowercase_ : Any=2 ,lowercase_ : List[str]="absolute" ,lowercase_ : str=True ,lowercase_ : int=None ,lowercase_ : Optional[Any]=False ,lowercase_ : Union[str, Any]=2 ,lowercase_ : Union[str, Any]=False ,lowercase_ : Tuple=True ,lowercase_ : Optional[Any]=True ,lowercase_ : str=("en_XX",) ,lowercase_ : List[str]=None ,**lowercase_ : Dict ,):
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Optional[int] = position_embedding_type
lowerCAmelCase__ : int = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
lowerCAmelCase__ : List[Any] = pre_norm
lowerCAmelCase__ : List[str] = adapter_reduction_factor
lowerCAmelCase__ : int = adapter_layer_norm
lowerCAmelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCAmelCase__ : Tuple = ln_before_adapter
lowerCAmelCase__ : Union[str, Any] = list(_a )
lowerCAmelCase__ : Any = default_language
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : str ):
if self.task == "multiple-choice":
lowerCAmelCase__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase__ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 370
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 74
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , **__a : Tuple ) -> Any:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , __a : Union[str, List[str], "Image", List["Image"]] , **__a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(__a , **__a )
def lowerCAmelCase ( self : Optional[Any] , **__a : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = {}
if "candidate_labels" in kwargs:
__lowercase : Optional[Any] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__lowercase : Union[str, Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=None , __a : List[Any]="This is a photo of {}." ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(__a )
__lowercase : Tuple = self.image_processor(images=[image] , return_tensors=self.framework )
__lowercase : Optional[int] = candidate_labels
__lowercase : Tuple = [hypothesis_template.format(__a ) for x in candidate_labels]
__lowercase : int = self.tokenizer(__a , return_tensors=self.framework , padding=__a )
__lowercase : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase ( self : Dict , __a : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = model_inputs.pop("""candidate_labels""" )
__lowercase : Optional[int] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __a ):
__lowercase : Any = text_inputs[0]
else:
# Batching case.
__lowercase : int = text_inputs[0][0]
__lowercase : Dict = self.model(**__a , **__a )
__lowercase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self : str , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = model_outputs.pop("""candidate_labels""" )
__lowercase : Union[str, Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
__lowercase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__lowercase : Optional[Any] = probs.tolist()
if not isinstance(__a , __a ):
__lowercase : List[str] = [scores]
elif self.framework == "tf":
__lowercase : Union[str, Any] = stable_softmax(__a , axis=-1 )
__lowercase : Tuple = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase : List[str] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] )
]
return result
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
__UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( snake_case__ : dict[int, list[int]] , snake_case__ : int , snake_case__ : list[bool] ) -> list[int]:
UpperCamelCase : Dict = True
UpperCamelCase : Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case__ , snake_case__ , snake_case__ )
order.append(snake_case__ )
return order
def UpperCamelCase ( snake_case__ : dict[int, list[int]] , snake_case__ : int , snake_case__ : list[bool] ) -> list[int]:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : str = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case__ , snake_case__ , snake_case__ )
return component
def UpperCamelCase ( snake_case__ : dict[int, list[int]] ) -> list[list[int]]:
UpperCamelCase : Optional[int] = len(snake_case__ ) * [False]
UpperCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(snake_case__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case__ )
UpperCamelCase : List[str] = []
for i, was_visited in enumerate(snake_case__ ):
if not was_visited:
order += topology_sort(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = []
UpperCamelCase : int = len(snake_case__ ) * [False]
for i in range(len(snake_case__ ) ):
UpperCamelCase : List[Any] = order[len(snake_case__ ) - i - 1]
if not visited[vert]:
UpperCamelCase : Optional[int] = find_components(snake_case__ , snake_case__ , snake_case__ )
components_list.append(snake_case__ )
return components_list
| 119
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119
| 1
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : str )-> Union[str, Any]:
with open(UpperCamelCase__, encoding='''utf-8''' ) as input_file:
lowerCamelCase__ : Union[str, Any] =re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
lowerCamelCase__ : Optional[Any] =input_file.read()
lowerCamelCase__ : Union[str, Any] =regexp.search(UpperCamelCase__ )
return match
def snake_case ( self : Optional[int], lowerCamelCase : str )-> int:
with open(UpperCamelCase__, encoding='''utf-8''' ) as input_file:
lowerCamelCase__ : Optional[Any] =re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
lowerCamelCase__ : Optional[int] =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase__ : str =regexp.finditer(UpperCamelCase__ )
lowerCamelCase__ : str =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case ( self : Any )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =Path('''./datasets''' )
lowerCamelCase__ : Any =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =Path('''./datasets''' )
lowerCamelCase__ : str =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 369
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=18 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
A : Any = size if size is not None else {'''height''': 20, '''width''': 20}
A : List[Any] = parent
A : Dict = batch_size
A : Optional[Any] = num_channels
A : str = image_size
A : List[Any] = min_resolution
A : Optional[int] = max_resolution
A : Union[str, Any] = size
A : Tuple = do_normalize
A : Tuple = do_convert_rgb
A : Union[str, Any] = [512, 1024, 2048, 4096]
A : Optional[int] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : str = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
A : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = PixaStructImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.image_processor_tester.prepare_dummy_image()
A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
A : int = 2048
A : Tuple = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
A : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
A : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
A : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
A : Any = '''Hello'''
A : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
A : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A : str = image_processor(
SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
A : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A : Dict = image_processor(
SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = PixaStructImageProcessingTester(self , num_channels=4 )
A : Optional[Any] = 3
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
A : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A : int = image_processor(
SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 3
|
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 97
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Any = TaTokenizerFast
a__ : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 195
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 195
| 1
|
"""simple docstring"""
__lowercase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_58_18,
}
def lowercase ( A_ , A_ , A_ )-> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
a : Optional[int] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(A_ )}'''
)
raise ValueError(A_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 74
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class a ( _lowerCamelCase ):
def __init__( self : Dict ):
# test for the above condition
self.test()
def A_ ( self : int ):
snake_case_ = 0
snake_case_ = False
while not completed:
if counter == 1:
self.reset()
snake_case_ = self.advance()
if not self.does_advance(lowercase_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
snake_case_ ,snake_case_ ,snake_case_ = self.update(lowercase_ )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def A_ ( self : Optional[Any] ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A_ ( self : Tuple , lowercase_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A_ ( self : str , lowercase_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A_ ( self : Any ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A_ ( self : Tuple ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def A_ ( self : Tuple , lowercase_ : int=False ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class a ( _lowerCamelCase ):
def __init__( self : Optional[int] , lowercase_ : List[int] ):
super(lowercase_ , self ).__init__()
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(lowercase_ , lowercase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
snake_case_ = token_ids
snake_case_ = len(self.token_ids )
snake_case_ = -1 # the index of the currently fulfilled step
snake_case_ = False
def A_ ( self : Optional[int] ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def A_ ( self : Optional[int] , lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def A_ ( self : List[str] , lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}" )
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(lowercase_ ):
self.fulfilled_idx += 1
snake_case_ = True
if self.fulfilled_idx == (self.seqlen - 1):
snake_case_ = True
snake_case_ = completed
else:
# failed to make progress.
snake_case_ = True
self.reset()
return stepped, completed, reset
def A_ ( self : Optional[Any] ):
snake_case_ = False
snake_case_ = 0
def A_ ( self : List[str] ):
return self.seqlen - (self.fulfilled_idx + 1)
def A_ ( self : int , lowercase_ : str=False ):
snake_case_ = PhrasalConstraint(self.token_ids )
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.fulfilled_idx
snake_case_ = self.completed
return new_constraint
class a :
def __init__( self : Tuple , lowercase_ : List[List[int]] , lowercase_ : str=True ):
snake_case_ = max([len(lowercase_ ) for one in nested_token_ids] )
snake_case_ = {}
for token_ids in nested_token_ids:
snake_case_ = root
for tidx, token_id in enumerate(lowercase_ ):
if token_id not in level:
snake_case_ = {}
snake_case_ = level[token_id]
if no_subsets and self.has_subsets(lowercase_ , lowercase_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
snake_case_ = root
def A_ ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = self.trie
for current_token in current_seq:
snake_case_ = start[current_token]
snake_case_ = list(start.keys() )
return next_tokens
def A_ ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ = self.next_tokens(lowercase_ )
return len(lowercase_ ) == 0
def A_ ( self : str , lowercase_ : List[str] ):
snake_case_ = list(root.values() )
if len(lowercase_ ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase_ ) for nn in next_nodes] )
def A_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] ):
snake_case_ = self.count_leaves(lowercase_ )
return len(lowercase_ ) != leaf_count
class a ( _lowerCamelCase ):
def __init__( self : Tuple , lowercase_ : List[List[int]] ):
super(lowercase_ , self ).__init__()
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(lowercase_ , lowercase_ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(lowercase_ , lowercase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
snake_case_ = DisjunctiveTrie(lowercase_ )
snake_case_ = nested_token_ids
snake_case_ = self.trie.max_height
snake_case_ = []
snake_case_ = False
def A_ ( self : str ):
snake_case_ = self.trie.next_tokens(self.current_seq )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def A_ ( self : str , lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}" )
snake_case_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def A_ ( self : List[Any] , lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}" )
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(lowercase_ ):
self.current_seq.append(lowercase_ )
snake_case_ = True
else:
snake_case_ = True
self.reset()
snake_case_ = self.trie.reached_leaf(self.current_seq )
snake_case_ = completed
return stepped, completed, reset
def A_ ( self : Optional[Any] ):
snake_case_ = False
snake_case_ = []
def A_ ( self : str ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def A_ ( self : Optional[Any] , lowercase_ : Tuple=False ):
snake_case_ = DisjunctiveConstraint(self.token_ids )
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.current_seq
snake_case_ = self.completed
return new_constraint
class a :
def __init__( self : List[Any] , lowercase_ : List[Constraint] ):
snake_case_ = constraints
# max # of steps required to fulfill a given constraint
snake_case_ = max([c.seqlen for c in constraints] )
snake_case_ = len(lowercase_ )
snake_case_ = False
self.init_state()
def A_ ( self : Optional[int] ):
snake_case_ = []
snake_case_ = None
snake_case_ = [constraint.copy(stateful=lowercase_ ) for constraint in self.constraints]
def A_ ( self : Optional[Any] ):
snake_case_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def A_ ( self : Any ):
snake_case_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
snake_case_ = constraint.advance()
if isinstance(lowercase_ , lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
token_list.extend(lowercase_ )
else:
snake_case_ = self.inprogress_constraint.advance()
if isinstance(lowercase_ , lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
token_list.extend(lowercase_ )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def A_ ( self : int , lowercase_ : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
snake_case_ ,snake_case_ = self.add(lowercase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def A_ ( self : Dict , lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
snake_case_ ,snake_case_ = False, False
if self.completed:
snake_case_ = True
snake_case_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
snake_case_ ,snake_case_ ,snake_case_ = self.inprogress_constraint.update(lowercase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase_ ) )
snake_case_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
snake_case_ = None
if len(self.pending_constraints ) == 0:
# we're done!
snake_case_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase_ ):
snake_case_ ,snake_case_ ,snake_case_ = pending_constraint.update(lowercase_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(lowercase_ )
snake_case_ = None
if not complete and stepped:
snake_case_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
snake_case_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
snake_case_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def A_ ( self : Union[str, Any] , lowercase_ : str=True ):
snake_case_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
snake_case_ = [
constraint.copy(stateful=lowercase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
snake_case_ = self.inprogress_constraint.copy(stateful=lowercase_ )
snake_case_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 72
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'spiece.model'}
a : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a : Dict = {'bert_for_seq_generation': 512}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = []
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Any , lowercase_ : str , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<::::>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def A_ ( self : int ):
return self.sp_model.get_piece_size()
def A_ ( self : Union[str, Any] ):
snake_case_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Optional[int] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Any , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase_ )
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = self.sp_model.IdToPiece(lowercase_ )
return token
def A_ ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
snake_case_ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def A_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 72
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ = False ) -> str:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
_SCREAMING_SNAKE_CASE = input_str.split("""_""" )
_SCREAMING_SNAKE_CASE = 0 if use_pascal else 1
_SCREAMING_SNAKE_CASE = words[start_index:]
_SCREAMING_SNAKE_CASE = [word[0].upper() + word[1:] for word in words_to_capitalize]
_SCREAMING_SNAKE_CASE = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Tuple = 3_2
lowerCAmelCase_ : List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=SCREAMING_SNAKE_CASE_ , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=SCREAMING_SNAKE_CASE_ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ : int = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase_ : Any = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE_ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE_ , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
lowerCAmelCase_ : str = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL()
lowerCAmelCase_ : Union[str, Any] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase_ : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase_ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = pipe('anime turle' , generator=SCREAMING_SNAKE_CASE_ , output_type='np' )
lowerCAmelCase_ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ : List[str] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase_ : List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ : str = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 366
|
"""simple docstring"""
import re
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : str = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase__ : Optional[int] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 289
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """openai-gpt"""
__UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[int] , lowercase_ :Optional[int]=4_04_78 , lowercase_ :List[Any]=5_12 , lowercase_ :List[str]=7_68 , lowercase_ :int=12 , lowercase_ :Dict=12 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Optional[Any]=1E-5 , lowercase_ :Optional[int]=0.02 , lowercase_ :Optional[Any]="cls_index" , lowercase_ :List[str]=True , lowercase_ :List[str]=None , lowercase_ :str=True , lowercase_ :int=0.1 , **lowercase_ :List[Any] , ) -> str:
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**lowercase_ )
| 78
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_snake_case = 'docs/source/en/_toctree.yml'
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = defaultdict(UpperCamelCase__ )
_a : Optional[Any] = []
_a : str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(UpperCamelCase__ )
_a : List[Any] = new_doc_list
_a : Dict = [key for key, value in counts.items() if value > 1]
_a : Any = []
for duplicate_key in duplicates:
_a : Optional[int] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_a : Optional[Any] = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def lowerCAmelCase__ ( UpperCamelCase__=False ):
'''simple docstring'''
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
_a : Dict = yaml.safe_load(f.read() )
# Get to the API doc
_a : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Optional[Any] = content[api_idx]["""sections"""]
# Then to the model doc
_a : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_a : List[Any] = api_doc[scheduler_idx]["""sections"""]
_a : List[str] = clean_doc_toc(UpperCamelCase__ )
_a : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
_a : List[str] = True
if overwrite:
_a : Tuple = new_scheduler_doc
if diff:
if overwrite:
_a : Optional[int] = api_doc
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowerCAmelCase__ ( UpperCamelCase__=False ):
'''simple docstring'''
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
_a : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Any = content[api_idx]["""sections"""]
# Then to the model doc
_a : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_a : Optional[int] = False
_a : Dict = api_doc[pipeline_idx]["""sections"""]
_a : List[str] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_a : Any = pipeline_doc["""section"""]
_a : Tuple = clean_doc_toc(UpperCamelCase__ )
if overwrite:
_a : Optional[int] = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
_a : Optional[Any] = clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
_a : Optional[int] = True
if overwrite:
_a : Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
_a : Any = api_doc
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 361
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 0
|
import functools
from typing import Any
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Validation
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not all(
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowercase = {}
lowercase = 'WORD_KEEPER'
for word in words:
lowercase = trie
for c in word:
if c not in trie_node:
lowercase = {}
lowercase = trie_node[c]
lowercase = True
lowercase = len(__SCREAMING_SNAKE_CASE )
# Dynamic programming method
@functools.cache
def is_breakable(__SCREAMING_SNAKE_CASE ) -> bool:
if index == len_string:
return True
lowercase = trie
for i in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = trie_node.get(string[i] , __SCREAMING_SNAKE_CASE )
if trie_node is None:
return False
if trie_node.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('Input value must be an \'int\' type' )
lowercase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
| 1
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase, _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_lowerCAmelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_a = name
_a = val
def __str__( self ) -> List[Any]:
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , __UpperCAmelCase ) -> Optional[Any]:
return self.val < other.val
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Tuple:
_a = {}
_a = {}
_a = self.build_heap(__UpperCAmelCase )
def __getitem__( self , __UpperCAmelCase ) -> Optional[Any]:
return self.get_value(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return (idx - 1) // 2
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return idx * 2 + 1
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
return self.heap_dict[key]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
_a = len(__UpperCAmelCase ) - 1
_a = self.get_parent_idx(__UpperCAmelCase )
for idx, i in enumerate(__UpperCAmelCase ):
_a = idx
_a = i.val
for i in range(__UpperCAmelCase , -1 , -1 ):
self.sift_down(__UpperCAmelCase , __UpperCAmelCase )
return array
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
while True:
_a = self.get_left_child_idx(__UpperCAmelCase ) # noqa: E741
_a = self.get_right_child_idx(__UpperCAmelCase )
_a = idx
if l < len(__UpperCAmelCase ) and array[l] < array[idx]:
_a = l
if r < len(__UpperCAmelCase ) and array[r] < array[smallest]:
_a = r
if smallest != idx:
_a , _a = array[smallest], array[idx]
(
(
_a
) , (
_a
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_a = smallest
else:
break
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_a = self.get_parent_idx(__UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_a , _a = self.heap[idx], self.heap[p]
_a , _a = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_a = p
_a = self.get_parent_idx(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
return self.heap[0]
def _UpperCAmelCase ( self ) -> Any:
_a , _a = self.heap[-1], self.heap[0]
_a , _a = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_a = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
self.heap.append(__UpperCAmelCase )
_a = len(self.heap ) - 1
_a = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> str:
return len(self.heap ) == 0
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_a = new_value
_a = new_value
self.sift_up(self.idx_of_element[node] )
__snake_case = Node('''R''', -1)
__snake_case = Node('''B''', 6)
__snake_case = Node('''A''', 3)
__snake_case = Node('''X''', 1)
__snake_case = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__snake_case = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase__ = 250004
lowerCAmelCase__ = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = MBartaaTokenizer
snake_case__ : Tuple = MBartaaTokenizerFast
snake_case__ : Any = True
snake_case__ : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = '''<s>'''
_lowerCamelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCAmelCase ) , 1_0_5_4 )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : int = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowerCamelCase : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : int = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : int = tempfile.mkdtemp()
_lowerCamelCase : List[str] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : Dict = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
snake_case__ : int = "facebook/mbart-large-50-one-to-many-mmt"
snake_case__ : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case__ : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case__ : int = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ):
"""simple docstring"""
_lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_lowerCamelCase : Any = 1
return cls
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
_lowerCamelCase : Optional[Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_lowerCamelCase : Optional[Any] = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = 1_0
_lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='''pt''' )
_lowerCamelCase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCamelCase : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_lowerCamelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='''pt''' )
_lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0 , return_tensors='''pt''' )
_lowerCamelCase : List[Any] = targets['''input_ids''']
_lowerCamelCase : int = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 72
|
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 1
|
'''simple docstring'''
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = val
_snake_case = None
_snake_case = None
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> int:
if self.val:
if val < self.val:
if self.left is None:
_snake_case = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
_snake_case = Node(_a )
else:
self.right.insert(_a )
else:
_snake_case = val
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: int ) -> Tuple:
"""simple docstring"""
if root:
inorder(root.left , _UpperCamelCase )
res.append(root.val )
inorder(root.right , _UpperCamelCase )
def __a ( _UpperCamelCase: Any ) -> List[Any]:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return arr
_snake_case = Node(arr[0] )
for i in range(1 , len(_UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_snake_case = []
inorder(_UpperCamelCase , _UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 370
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase_ : int = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCamelCase_ : str = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def __a ( _UpperCamelCase: str ) -> Any:
"""simple docstring"""
_snake_case = list(state_dict.keys() )
for name in state_dict_keys:
_snake_case = state_dict.pop(_UpperCamelCase )
# emb -> embedding
if name.startswith("emb." ):
_snake_case = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_snake_case = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_snake_case = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , _UpperCamelCase )
# ffn -> feed_forward
_snake_case = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , _UpperCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_snake_case = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_snake_case = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_snake_case = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_snake_case = "rwkv." + name
_snake_case = weight
return state_dict
def __a ( _UpperCamelCase: Any , _UpperCamelCase: List[Any] , _UpperCamelCase: List[Any] , _UpperCamelCase: str=None , _UpperCamelCase: Optional[Any]=None , _UpperCamelCase: List[str]=False , _UpperCamelCase: Dict=None ) -> Dict:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_snake_case = 50_277
_snake_case = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_snake_case = PreTrainedTokenizerFast(tokenizer_file=_UpperCamelCase )
_snake_case = len(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
# 2. Build the config
_snake_case = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_snake_case = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_snake_case = RwkvConfig(
vocab_size=_UpperCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCamelCase )
# 3. Download model file then convert state_dict
_snake_case = hf_hub_download(_UpperCamelCase , _UpperCamelCase )
_snake_case = torch.load(_UpperCamelCase , map_location="cpu" )
_snake_case = convert_state_dict(_UpperCamelCase )
# 4. Split in shards and save
_snake_case , _snake_case = shard_checkpoint(_UpperCamelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
if index is not None:
_snake_case = os.path.join(_UpperCamelCase , _UpperCamelCase )
# Save the index as well
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
_snake_case = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
f.write(_UpperCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_snake_case = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_snake_case = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_snake_case = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCamelCase_ : int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 142
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''src/diffusers'''
_a = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_a = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_a = spec.loader.load_module()
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE ) is not None
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = object_name.split('.' )
__lowerCAmelCase: Tuple = 0
# First let's find the module where our object lives.
__lowerCAmelCase: int = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f'''{module}.py''' ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , f'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase: int = f.readlines()
# Now let's find the class / func in the code!
__lowerCAmelCase: int = ''
__lowerCAmelCase: Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowerCAmelCase: Any = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCAmelCase: Optional[int] = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
_a = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_a = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_a = re.compile(R'''<FILL\s+[^>]*>''')
def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: int = code.split('\n' )
__lowerCAmelCase: Tuple = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: List[str] = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
__lowerCAmelCase: List[Any] = f'''class Bla:\n{code}'''
__lowerCAmelCase: Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Any = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len('class Bla:\n' ) :] if has_indent else result
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=False ) -> Tuple:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase: str = f.readlines()
__lowerCAmelCase: Any = []
__lowerCAmelCase: Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Any = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = search.groups()
__lowerCAmelCase: Optional[Any] = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = get_indent(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowerCAmelCase: Optional[int] = theoretical_indent
__lowerCAmelCase: int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowerCAmelCase: Optional[Any] = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
__lowerCAmelCase: Dict = lines[line_index]
__lowerCAmelCase: Union[str, Any] = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f'''^{indent}# End copy''' , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCAmelCase: int = lines[start_index:line_index]
__lowerCAmelCase: Optional[int] = ''.join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
__lowerCAmelCase: Optional[int] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
__lowerCAmelCase: int = '\n'.join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Optional[int] = replace_pattern.replace('with' , '' ).split(',' )
__lowerCAmelCase: Union[str, Any] = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = pattern.groups()
__lowerCAmelCase: Dict = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
__lowerCAmelCase: Optional[int] = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowerCAmelCase: List[Any] = blackify(lines[start_index - 1] + theoretical_code )
__lowerCAmelCase: List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowerCAmelCase: str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowerCAmelCase: Optional[Any] = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def _a ( SCREAMING_SNAKE_CASE : List[Any] = False ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = []
for filename in all_files:
__lowerCAmelCase: Union[str, Any] = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Optional[Any] = '\n'.join(SCREAMING_SNAKE_CASE )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_a = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 322
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''focalnet'''
def __init__( self : List[Any] ,_a : Optional[Any]=224 ,_a : int=4 ,_a : Union[str, Any]=3 ,_a : List[str]=96 ,_a : Union[str, Any]=False ,_a : str=[192, 384, 768, 768] ,_a : Dict=[2, 2, 6, 2] ,_a : Any=[2, 2, 2, 2] ,_a : str=[3, 3, 3, 3] ,_a : Optional[int]="gelu" ,_a : Tuple=4.0 ,_a : str=0.0 ,_a : Union[str, Any]=0.1 ,_a : str=False ,_a : int=1E-4 ,_a : Dict=False ,_a : Optional[Any]=False ,_a : Optional[int]=False ,_a : str=0.02 ,_a : Optional[Any]=1E-5 ,_a : List[str]=32 ,_a : Tuple=None ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : List[str] = image_size
_a : Optional[Any] = patch_size
_a : List[str] = num_channels
_a : List[str] = embed_dim
_a : Optional[int] = use_conv_embed
_a : Optional[Any] = hidden_sizes
_a : Dict = depths
_a : int = focal_levels
_a : Optional[int] = focal_windows
_a : List[str] = hidden_act
_a : List[Any] = mlp_ratio
_a : Optional[int] = hidden_dropout_prob
_a : Optional[int] = drop_path_rate
_a : str = use_layerscale
_a : Any = layerscale_value
_a : Tuple = use_post_layernorm
_a : List[str] = use_post_layernorm_in_modulation
_a : Optional[int] = normalize_modulator
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
_a : Optional[int] = encoder_stride
_a : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
_a : Dict = get_aligned_output_features_output_indices(
out_features=_a ,out_indices=_a ,stage_names=self.stage_names )
| 355
|
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 0
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
print("Generating prime p..." )
lowerCAmelCase : int = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
lowerCAmelCase : Tuple = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
lowerCAmelCase : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
lowerCAmelCase : Tuple = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
lowerCAmelCase : str = (n, e)
lowerCAmelCase : int = (n, d)
return (public_key, private_key)
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowerCAmelCase , lowerCAmelCase : Dict = generate_key(SCREAMING_SNAKE_CASE )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 108
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = parent
a__ : Union[str, Any] = 1_3
a__ : Tuple = 7
a__ : Dict = 3_0
a__ : List[Any] = self.seq_length + self.mem_len
a__ : Tuple = 1_5
a__ : Tuple = True
a__ : Any = True
a__ : str = 9_9
a__ : Tuple = [1_0, 5_0, 8_0]
a__ : Tuple = 3_2
a__ : Tuple = 3_2
a__ : int = 4
a__ : str = 8
a__ : Any = 1_2_8
a__ : Union[str, Any] = 2
a__ : Any = 2
a__ : int = None
a__ : List[str] = 1
a__ : Optional[Any] = 0
a__ : int = 3
a__ : int = self.vocab_size - 1
a__ : Optional[int] = 0.0_1
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Any = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : str = TFTransfoXLModel(__lowercase )
a__ , a__ : Optional[Any] = model(__lowercase ).to_tuple()
a__ : Any = {"""input_ids""": input_ids_a, """mems""": mems_a}
a__ , a__ : List[str] = model(__lowercase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = TFTransfoXLLMHeadModel(__lowercase )
a__ , a__ : List[Any] = model(__lowercase ).to_tuple()
a__ : str = {"""input_ids""": input_ids_a, """labels""": lm_labels}
a__ , a__ : List[str] = model(__lowercase ).to_tuple()
a__ , a__ : Dict = model([input_ids_a, mems_a] ).to_tuple()
a__ : Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
a__ , a__ : Tuple = model(__lowercase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : List[str] = TFTransfoXLForSequenceClassification(__lowercase )
a__ : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__)) : Union[str, Any] = config_and_inputs
a__ : Dict = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase :Any = () if is_tf_available() else ()
__lowerCAmelCase :List[str] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Tuple = False
__lowerCAmelCase :int = False
__lowerCAmelCase :Tuple = False
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = TFTransfoXLModelTester(self )
a__ : Dict = ConfigTester(self , config_class=__lowercase , d_embed=3_7 )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
self.model_tester.set_seed()
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.model_tester.set_seed()
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(__lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a__ : List[str] = model.get_output_embeddings()
assert isinstance(__lowercase , tf.keras.layers.Layer )
a__ : Any = model.get_bias()
assert name is None
else:
a__ : Any = model.get_output_embeddings()
assert x is None
a__ : Dict = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = TFTransfoXLModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
pass
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
a__ : Union[str, Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a__ : Optional[int] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a__ : Any = model.generate(__lowercase , max_length=2_0_0 , do_sample=__lowercase )
self.assertListEqual(output_ids[0].numpy().tolist() , __lowercase )
| 266
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : List[str] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Optional[Any] ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_lowercase : int ={"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
a__ : Optional[Any] = bs[:]
a__ : List[Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowercase)
cs.append(2**8 + n)
n += 1
a__ : Tuple = [chr(_lowercase) for n in cs]
return dict(zip(_lowercase , _lowercase))
def lowerCAmelCase_ ( _lowercase : Tuple) -> List[str]:
"""simple docstring"""
a__ : int = set()
a__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Any = char
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = VOCAB_FILES_NAMES
__lowerCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> List[Any]:
"""simple docstring"""
a__ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
a__ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
a__ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
a__ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
a__ : List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
a__ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : List[str] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : str = json.load(__lowercase )
a__ : Dict = {v: k for k, v in self.encoder.items()}
a__ : Any = errors # how to handle errors in decoding
a__ : Union[str, Any] = bytes_to_unicode()
a__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
a__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
a__ : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Optional[Any] = {}
a__ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = get_pairs(__lowercase )
if not pairs:
return token
while True:
a__ : List[Any] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : Dict = bigram
a__ : List[Any] = []
a__ : int = 0
while i < len(__lowercase ):
try:
a__ : str = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : Optional[int] = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : List[Any] = get_pairs(__lowercase )
a__ : Optional[Any] = """ """.join(__lowercase )
a__ : Tuple = word
return word
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = []
for token in re.findall(self.pat , __lowercase ):
a__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
return self.decoder.get(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = """""".join(__lowercase )
a__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : int = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : Optional[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : str = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Tuple = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : Any = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False , **__lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
a__ : Union[str, Any] = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[str]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[int]:
"""simple docstring"""
a__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__lowercase )
a__ : Optional[int] = """ """.join(__lowercase )
a__ : Any = self.encode(__lowercase )
if len(__lowercase ) > self.model_max_length:
a__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 266
| 1
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE : List[Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE : int = logging.WARNING
def lowercase ( ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = os.getenv('''DATASETS_VERBOSITY''' , _snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase ( ) ->str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def lowercase ( ) ->logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Optional[int] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase ( _snake_case : Optional[str] = None ) ->logging.Logger:
"""simple docstring"""
if name is None:
__snake_case : Optional[int] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase ( ) ->int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowercase ( _snake_case : int ) ->None:
"""simple docstring"""
_get_library_root_logger().setLevel(_snake_case )
def lowercase ( ) ->str:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->str:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->Any:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->Tuple:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : List[str] = False
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Any = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , *a_ , **a_ ): # pylint: disable=unused-argument
'''simple docstring'''
__snake_case : Dict = args[0] if args else None
def __iter__(self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__(self , a_ ):
'''simple docstring'''
def empty_fn(*a_ , **a_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ):
'''simple docstring'''
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
return
SCREAMING_SNAKE_CASE : Optional[Any] = True
class _UpperCAmelCase :
'''simple docstring'''
def __call__(self , *a_ , a_=False , **a_ ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a_ , **a_ )
else:
return EmptyTqdm(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
__snake_case : int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE : Union[str, Any] = _tqdm_cls()
def lowercase ( ) ->bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
global _tqdm_active
__snake_case : Optional[Any] = True
def lowercase ( ) ->Tuple:
"""simple docstring"""
global _tqdm_active
__snake_case : Optional[int] = False
| 102
|
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
lowercase__ = cvtColor(img, COLOR_BGR2GRAY)
def _snake_case ( ):
_lowerCamelCase : List[str] = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _snake_case ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _snake_case ( ):
_lowerCamelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _snake_case ( ):
_lowerCamelCase : int = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase : List[Any] = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def _snake_case ( ):
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def _snake_case ( ):
# laplace diagonals
_lowerCamelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_lowerCamelCase : int = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def _snake_case ( ):
assert med.median_filter(lowercase__ , 3 ).any()
def _snake_case ( ):
_lowerCamelCase, _lowerCamelCase : List[Any] = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def _snake_case ( ):
_lowerCamelCase : Dict = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ):
_lowerCamelCase : Any = bs.Burkes(imread(lowercase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
_lowerCamelCase : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _snake_case ( ):
_lowerCamelCase : str = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_lowerCamelCase : List[str] = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = image[x_coordinate][y_coordinate]
_lowerCamelCase : Optional[int] = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCamelCase : Optional[Any] = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any()
| 12
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 12
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_a = logging.getLogger(__name__)
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
_a = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_a = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_a = Counter()
for tk_ids in data:
counter.update(tk_ids)
_a = [0] * args.vocab_size
for k, v in counter.items():
_a = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 39
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , A : str , A : List[str]=1_0_0 , A : List[str]=1_3 , A : Union[str, Any]=3_0 , A : Union[str, Any]=2 , A : List[Any]=3 , A : Any=True , A : Tuple=True , A : Tuple=3_2 , A : str=5 , A : Any=4 , A : List[str]=3_7 , A : Tuple="gelu" , A : Union[str, Any]=0.1 , A : Tuple=0.1 , A : Union[str, Any]=1_0 , A : List[str]=0.02 , A : Dict=3 , ) ->int:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Tuple = use_labels
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : List[Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : Tuple = num_patches + 1
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __lowerCamelCase ( self : List[Any] , A : str , A : List[Any] , A : Any ) ->Tuple:
lowerCamelCase__ : Union[str, Any] = FlaxBeitModel(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Union[str, Any] , A : List[str] , A : Optional[int] , A : Dict ) ->Optional[int]:
lowerCamelCase__ : Dict = FlaxBeitForMaskedImageModeling(config=A )
lowerCamelCase__ : Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : Optional[int] , A : List[Any] ) ->Any:
lowerCamelCase__ : Tuple = self.type_sequence_label_size
lowerCamelCase__ : Tuple = FlaxBeitForImageClassification(config=A )
lowerCamelCase__ : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[int] = FlaxBeitForImageClassification(A )
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = model(A )
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : int = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCamelCase ( self : str ) ->None:
lowerCamelCase__ : Dict = FlaxBeitModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCamelCase ( self : List[str] ) ->Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(A )
lowerCamelCase__ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCamelCase ( self : int ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(A , A )
lowerCamelCase__ : Optional[int] = model_class(A )
@jax.jit
def model_jitted(A : str , **A : Optional[int] ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : str = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Dict ) ->Any:
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCamelCase__ : Union[str, Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self : str ) ->str:
lowerCamelCase__ : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCamelCase__ : Optional[Any] = self.default_image_processor
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=A , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCamelCase__ : List[str] = np.ones((1, 1_9_6) , dtype=A )
# forward pass
lowerCamelCase__ : Optional[int] = model(pixel_values=A , bool_masked_pos=A )
lowerCamelCase__ : Optional[Any] = outputs.logits
# verify the logits
lowerCamelCase__ : str = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : Any = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1e-2 ) )
@slow
def __lowerCamelCase ( self : Dict ) ->List[Any]:
lowerCamelCase__ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=A , return_tensors='''np''' )
# forward pass
lowerCamelCase__ : List[str] = model(**A )
lowerCamelCase__ : Optional[int] = outputs.logits
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : Any = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
lowerCamelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=A , return_tensors='''np''' )
# forward pass
lowerCamelCase__ : Union[str, Any] = model(**A )
lowerCamelCase__ : Any = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : str = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
lowerCamelCase__ : List[Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , A )
| 142
| 0
|
from statistics import mean
import numpy as np
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_snake_case = 0
# Number of processes finished
_snake_case = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_snake_case = [0] * no_of_process
# List to include calculation results
_snake_case = [0] * no_of_process
# Sort by arrival time.
_snake_case = [burst_time[i] for i in np.argsort(_UpperCamelCase )]
_snake_case = [process_name[i] for i in np.argsort(_UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
_snake_case = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_snake_case = arrival_time[i]
_snake_case = 0
# Index showing the location of the process being performed
_snake_case = 0
# Saves the current response ratio.
_snake_case = 0
for i in range(0 , _UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_snake_case = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_snake_case = temp
_snake_case = i
# Calculate the turn around time
_snake_case = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_snake_case = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_snake_case = [0] * no_of_process
for i in range(0 , _UpperCamelCase ):
_snake_case = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__A = 5
__A = ['''A''', '''B''', '''C''', '''D''', '''E''']
__A = [1, 2, 3, 4, 5]
__A = [1, 2, 3, 4, 5]
__A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 356
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : Optional[Any] , A__ : str , A__ : Any=13 , A__ : str=[30, 30] , A__ : int=2 , A__ : Dict=3 , A__ : str=True , A__ : Union[str, Any]=True , A__ : Any=32 , A__ : int=5 , A__ : str=4 , A__ : List[Any]=37 , A__ : Union[str, Any]="gelu" , A__ : Dict=0.1 , A__ : Dict=0.1 , A__ : Tuple=10 , A__ : Dict=0.02 , A__ : Any=3 , A__ : Union[str, Any]=None , A__ : Optional[Any]=8 , A__ : Dict=10 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = n_targets
_snake_case = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_snake_case = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_snake_case = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase_ ( self : List[str] ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_snake_case = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_snake_case = []
for i in range(self.batch_size ):
_snake_case = {}
_snake_case = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=A__ )
_snake_case = torch.rand(self.n_targets , 4 , device=A__ )
labels.append(A__ )
_snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Dict ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Any , A__ : Any , A__ : str , A__ : Tuple ) -> Dict:
_snake_case = YolosModel(config=A__ )
model.to(A__ )
model.eval()
_snake_case = model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , A__ : List[str] , A__ : Optional[Any] , A__ : str ) -> int:
_snake_case = YolosForObjectDetection(A__ )
model.to(A__ )
model.eval()
_snake_case = model(pixel_values=A__ )
_snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_snake_case = model(pixel_values=A__ , labels=A__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
_snake_case, _snake_case, _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ : int = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def UpperCamelCase_ ( self : Dict , A__ : List[Any] , A__ : List[str] , A__ : Optional[int]=False ) -> Optional[int]:
_snake_case = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_snake_case = []
for i in range(self.model_tester.batch_size ):
_snake_case = {}
_snake_case = torch.ones(
size=(self.model_tester.n_targets,) , device=A__ , dtype=torch.long )
_snake_case = torch.ones(
self.model_tester.n_targets , 4 , device=A__ , dtype=torch.float )
labels.append(A__ )
_snake_case = labels
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ) -> List[str]:
_snake_case = YolosModelTester(self )
_snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ) -> str:
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(A__ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase_ ( self : List[str] ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> int:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
# in YOLOS, the seq_len is different
_snake_case = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_snake_case = len(A__ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = 1
self.assertEqual(out_len + added_hidden_states , len(A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ ( self : int ) -> Dict:
def check_hidden_states_output(A__ : Optional[int] , A__ : Union[str, Any] , A__ : int ):
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.hidden_states
_snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A__ ) , A__ )
# YOLOS has a different seq_length
_snake_case = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCamelCase_ ( self : Optional[Any] ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A__ )
@slow
def UpperCamelCase_ ( self : List[str] ) -> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = YolosModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def snake_case_() -> str:
"""simple docstring"""
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Any ) -> str:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Tuple ) -> str:
_snake_case = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(A__ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
_snake_case = model(inputs.pixel_values )
# verify outputs
_snake_case = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , A__ )
_snake_case = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=A__ , )
_snake_case = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) )
# verify postprocessing
_snake_case = image_processor.post_process_object_detection(
A__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_snake_case = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(A__ )
_snake_case = [75, 75, 17, 63, 17]
_snake_case = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(A__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , A__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , A__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , A__ ) )
| 278
| 0
|
import logging
from transformers import PretrainedConfig
a =logging.getLogger(__name__)
a ={
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''bertabs'''
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 ,SCREAMING_SNAKE_CASE__ : int=6 ,SCREAMING_SNAKE_CASE__ : Tuple=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : Dict=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.2 ,SCREAMING_SNAKE_CASE__ : List[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=8 ,SCREAMING_SNAKE_CASE__ : List[Any]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.2 ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : List[str] = max_pos
__lowerCamelCase : str = enc_layers
__lowerCamelCase : Dict = enc_hidden_size
__lowerCamelCase : Tuple = enc_heads
__lowerCamelCase : Any = enc_ff_size
__lowerCamelCase : Optional[int] = enc_dropout
__lowerCamelCase : str = dec_layers
__lowerCamelCase : List[Any] = dec_hidden_size
__lowerCamelCase : Optional[int] = dec_heads
__lowerCamelCase : int = dec_ff_size
__lowerCamelCase : Union[str, Any] = dec_dropout
| 73
|
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5
| 0
|
"""simple docstring"""
from math import isclose, sqrt
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
a : Dict = point_y / 4 / point_x
a : Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
a : Dict = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
a : List[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
a : Optional[Any] = outgoing_gradient**2 + 4
a : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
a : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
a : Tuple = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
a : Union[str, Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
a : List[Any] = x_minus if isclose(A_ , A_ ) else x_plus
a : Optional[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case (A_ :float = 1.4 , A_ :float = -9.6 ):
'''simple docstring'''
a : int = 0
a : float = first_x_coord
a : float = first_y_coord
a : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
a : Union[str, Any] = next_point(A_ , A_ , A_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 354
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_UpperCamelCase : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case ( UpperCAmelCase ):
__magic_name__ = field(default=UpperCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
__magic_name__ = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__magic_name__ = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
__magic_name__ = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
__magic_name__ = field(
default=UpperCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(A , A ):
a : str = v.to_dict()
return d
| 186
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : int, _lowerCamelCase : str=None, _lowerCamelCase : str=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : int="resnet50", _lowerCamelCase : Tuple=3, _lowerCamelCase : List[str]=32, _lowerCamelCase : int=3, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Tuple=True, ):
'''simple docstring'''
__A = parent
__A = out_indices if out_indices is not None else [4]
__A = stage_names
__A = out_features
__A = backbone
__A = batch_size
__A = image_size
__A = num_channels
__A = use_pretrained_backbone
__A = is_training
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = self.get_config()
return config, pixel_values
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Any, _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__A = TimmBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__A = model(_lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (TimmBackbone,) if is_torch_available() else ()
A_ : Tuple = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
A_ : int = False
A_ : Tuple = False
A_ : Optional[int] = False
A_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = TimmBackboneModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''resnet18'''
__A = '''microsoft/resnet-18'''
__A = AutoBackbone.from_pretrained(_lowerCamelCase, use_timm_backbone=_lowerCamelCase )
__A = AutoBackbone.from_pretrained(_lowerCamelCase )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ), len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices, (-1,) )
self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names ) - 1] )
__A = AutoBackbone.from_pretrained(_lowerCamelCase, use_timm_backbone=_lowerCamelCase, out_indices=[1, 2, 3] )
__A = AutoBackbone.from_pretrained(_lowerCamelCase, out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices, transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
__A = self.has_attentions
# no need to test all models as different heads yield the same functionality
__A = self.all_model_classes[0]
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = model(**_lowerCamelCase )
__A = outputs[0][-1]
# Encoder-/Decoder-only models
__A = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__A = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ), len(config.out_indices ) )
self.assertEqual(len(model.channels ), len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__A = copy.deepcopy(_lowerCamelCase )
__A = None
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ), 1 )
self.assertEqual(len(model.channels ), 1 )
# Check backbone can be initialized with fresh weights
__A = copy.deepcopy(_lowerCamelCase )
__A = False
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(**_lowerCamelCase )
| 266
|
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__A = data
__A = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
__A = []
__A = self
while temp:
string_rep.append(f'{temp.data}' )
__A = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__A = __A = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__A = Node(elements_list[i] )
__A = current.next
return head
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 266
| 1
|
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: List[str] = position
UpperCamelCase_: Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase_: Tuple = []
for position in positions:
UpperCamelCase_: Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase )
return permissible_positions
def A__ ( lowerCamelCase ) -> List[str]:
return not any(elem == 0 for row in board for elem in row )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
if is_complete(lowerCamelCase ):
return True
for position in get_valid_pos(lowerCamelCase , len(lowerCamelCase ) ):
UpperCamelCase_: str = position
if board[y][x] == 0:
UpperCamelCase_: Optional[Any] = curr + 1
if open_knight_tour_helper(lowerCamelCase , lowerCamelCase , curr + 1 ):
return True
UpperCamelCase_: List[str] = 0
return False
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Any = [[0 for i in range(lowerCamelCase )] for j in range(lowerCamelCase )]
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
UpperCamelCase_: int = 1
if open_knight_tour_helper(lowerCamelCase , (i, j) , 1 ):
return board
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: str = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase_ = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = cn.convert_to_negative(A__ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A__ , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCamelCase = canny.canny(A__ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(A__ , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCamelCase = conv.img_convolve(A__ , A__ ).astype(A__ )
assert res.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
assert med.median_filter(A__ , 3 ).any()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = sob.sobel_filter(A__ )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = sp.make_sepia(A__ , 20 )
assert sepia.all()
def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCamelCase = bs.Burkes(imread(A__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCamelCase = rs.NearestNeighbour(imread(A__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__lowerCamelCase = imread(A__ , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = image[x_coordinate][y_coordinate]
__lowerCamelCase = lbp.get_neighbors_pixel(
A__ , A__ , A__ , A__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCamelCase = lbp.local_binary_value(A__ , A__ , A__ )
assert lbp_image.any()
| 12
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase( _a ):
lowercase_ : List[str] = ["""image_processor""", """tokenizer"""]
lowercase_ : Dict = """ChineseCLIPImageProcessor"""
lowercase_ : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', _a, )
_lowercase : str = kwargs.pop('feature_extractor')
_lowercase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_a, _a)
_lowercase : Tuple = self.image_processor
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
_lowercase : Optional[int] = self.tokenizer(_a, return_tensors=_a, **_a)
if images is not None:
_lowercase : Any = self.image_processor(_a, return_tensors=_a, **_a)
if text is not None and images is not None:
_lowercase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a), tensor_type=_a)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a, **_a)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_a, **_a)
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = self.tokenizer.model_input_names
_lowercase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', _a, )
return self.image_processor_class
| 370
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """char"""
lowercase_ : Any = """bpe"""
lowercase_ : Optional[int] = """wp"""
SCREAMING_SNAKE_CASE : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCamelCase( _a ):
lowercase_ : Any = ["""image_processor""", """char_tokenizer"""]
lowercase_ : Tuple = """ViTImageProcessor"""
lowercase_ : List[str] = """MgpstrTokenizer"""
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : str = kwargs.pop('feature_extractor')
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
_lowercase : List[Any] = tokenizer
_lowercase : Tuple = AutoTokenizer.from_pretrained('gpt2')
_lowercase : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased')
super().__init__(lowerCamelCase, lowerCamelCase)
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
_lowercase : Optional[Any] = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : Optional[int] = self.char_tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Optional[int] = sequences
_lowercase : str = char_preds.size(0)
_lowercase , _lowercase : List[Any] = self._decode_helper(lowerCamelCase, 'char')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'bpe')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'wp')
_lowercase : Dict = []
_lowercase : Any = []
for i in range(lowerCamelCase):
_lowercase : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowercase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowercase : Union[str, Any] = scores.index(max(lowerCamelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
_lowercase : str = {}
_lowercase : int = final_strs
_lowercase : Optional[Any] = final_scores
_lowercase : Tuple = char_strs
_lowercase : Dict = bpe_strs
_lowercase : Tuple = wp_strs
return out
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
if format == DecodeType.CHARACTER:
_lowercase : Optional[Any] = self.char_decode
_lowercase : int = 1
_lowercase : int = '[s]'
elif format == DecodeType.BPE:
_lowercase : List[Any] = self.bpe_decode
_lowercase : Union[str, Any] = 2
_lowercase : Any = '#'
elif format == DecodeType.WORDPIECE:
_lowercase : int = self.wp_decode
_lowercase : Optional[Any] = 1_02
_lowercase : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''')
_lowercase , _lowercase : Tuple = [], []
_lowercase : str = pred_logits.size(0)
_lowercase : Tuple = pred_logits.size(1)
_lowercase , _lowercase : Dict = pred_logits.topk(1, dim=-1, largest=lowerCamelCase, sorted=lowerCamelCase)
_lowercase : List[str] = preds_index.view(-1, lowerCamelCase)[:, 1:]
_lowercase : int = decoder(lowerCamelCase)
_lowercase , _lowercase : Optional[Any] = torch.nn.functional.softmax(lowerCamelCase, dim=2).max(dim=2)
_lowercase : Optional[Any] = preds_max_prob[:, 1:]
for index in range(lowerCamelCase):
_lowercase : List[str] = preds_str[index].find(lowerCamelCase)
_lowercase : int = preds_str[index][:pred_eos]
_lowercase : List[str] = preds_index[index].cpu().tolist()
_lowercase : Optional[int] = pred_index.index(lowerCamelCase) if eos_token in pred_index else -1
_lowercase : int = preds_max_prob[index][: pred_eos_index + 1]
_lowercase : Tuple = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase)
conf_scores.append(lowerCamelCase)
return dec_strs, conf_scores
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = [seq.replace(' ', '') for seq in self.char_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = [seq.replace(' ', '') for seq in self.wp_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
| 84
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( __UpperCAmelCase ):
lowerCamelCase : List[Any] = '''Wav2Vec2FeatureExtractor'''
lowerCamelCase : List[str] = '''AutoTokenizer'''
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) -> Dict:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
@classmethod
def __UpperCAmelCase ( cls : Any , UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ) -> Dict:
try:
return super().from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , UpperCamelCase__ , )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
return cls(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
def __call__( self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int] ) -> Dict:
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCAmelCase = kwargs.pop('raw_speech' )
else:
lowerCAmelCase = kwargs.pop('audio' , UpperCamelCase__ )
lowerCAmelCase = kwargs.pop('sampling_rate' , UpperCamelCase__ )
lowerCAmelCase = kwargs.pop('text' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCAmelCase = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
lowerCAmelCase = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase = encodings['input_ids']
return inputs
def __UpperCAmelCase ( self : List[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase = kwargs.pop('input_features' , UpperCamelCase__ )
lowerCAmelCase = kwargs.pop('labels' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if input_features is not None:
lowerCAmelCase = self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if labels is not None:
lowerCAmelCase = self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase = labels['input_ids']
return input_features
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str ) -> Dict:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@contextmanager
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
| 4
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = torch.device('''cpu''')
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase ( _A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for k in state_dict.keys():
lowerCAmelCase_ = k
if ".pwconv" in k:
lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase_ = [3, 3, 6, 4]
lowerCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase_ = [3, 3, 9, 6]
lowerCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase_ = [4, 3, 10, 5]
lowerCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase_ = [4, 4, 12, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
lowerCAmelCase_ = get_expected_output(_A )
lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 278
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : int = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
import math
import tensorflow as tf
from packaging import version
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(math.pi , x.dtype )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=-1 ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = tf.keras.activations.gelu
UpperCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
UpperCAmelCase : List[Any] = _gelu
UpperCAmelCase : str = _gelu_new
UpperCAmelCase : Union[str, Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 66
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return choice(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = random_pivot(SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
lowerCAmelCase : Any = [e for e in lst if e < pivot]
lowerCAmelCase : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(SCREAMING_SNAKE_CASE , k - len(SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """Hello world! cécé herlolip"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
A_ : Dict = roberta.model.encoder.sentence_encoder
A_ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
A_ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , SCREAMING_SNAKE_CASE )
A_ : List[str] = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : str = roberta_sent_encoder.embed_tokens.weight
A_ : int = roberta_sent_encoder.embed_positions.weight
A_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
A_ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : Dict = roberta_layer.self_attn_layer_norm.weight
A_ : str = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : str = roberta_layer.self_attn.q_proj.weight
A_ : List[str] = roberta_layer.self_attn.q_proj.bias
A_ : int = roberta_layer.self_attn.k_proj.weight
A_ : List[Any] = roberta_layer.self_attn.k_proj.bias
A_ : Dict = roberta_layer.self_attn.v_proj.weight
A_ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : Any = roberta_layer.self_attn.out_proj.weight
A_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : Any = roberta_layer.final_layer_norm.weight
A_ : int = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : int = roberta_layer.fca.weight
A_ : List[str] = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Optional[int] = roberta_layer.fca.weight
A_ : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''].dense.weight
A_ : int = roberta.model.classification_heads['''mnli'''].dense.bias
A_ : str = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ : int = roberta.model.encoder.lm_head.dense.weight
A_ : List[str] = roberta.model.encoder.lm_head.dense.bias
A_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
A_ : int = roberta.model.encoder.lm_head.layer_norm.bias
A_ : Optional[int] = roberta.model.encoder.lm_head.weight
A_ : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
A_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) )
else:
A_ : int = roberta.model(SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ : Tuple = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 186
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase_ : List[str] = DetaConfig(
backbone_config=_lowercase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_lowercase , with_box_refine=_lowercase , two_stage=_lowercase , )
# set labels
UpperCAmelCase_ : Union[str, Any] = '''huggingface/label-files'''
if "o365" in model_name:
UpperCAmelCase_ : Union[str, Any] = 366
UpperCAmelCase_ : List[str] = '''object365-id2label.json'''
else:
UpperCAmelCase_ : List[str] = 91
UpperCAmelCase_ : Optional[Any] = '''coco-detection-id2label.json'''
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : str = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = dct.pop(_lowercase )
UpperCAmelCase_ : Union[str, Any] = val
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[:dim, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[: dim]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : int = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Tuple = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[int] = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[:hidden_size]
UpperCAmelCase_ : Optional[int] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : Tuple = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = get_deta_config(_lowercase )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
UpperCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_lowercase , param.shape )
# rename keys
UpperCAmelCase_ : Union[str, Any] = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_swin_q_k_v(_lowercase , config.backbone_config )
read_in_decoder_q_k_v(_lowercase , _lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ : Dict = state_dict.pop(_lowercase )
UpperCAmelCase_ : Optional[Any] = val
if "input_proj" in key:
UpperCAmelCase_ : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase_ : Tuple = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase_ : Dict = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ : Optional[int] = DetaForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCAmelCase_ : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_lowercase )
# load image processor
UpperCAmelCase_ : Optional[int] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : List[str] = processor(images=_lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : str = encoding['''pixel_values''']
UpperCAmelCase_ : Tuple = model(pixel_values.to(_lowercase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ : int = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowercase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 235
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=19 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Dict:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = scope
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : str = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=_SCREAMING_SNAKE_CASE ,esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} ,)
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Optional[int] = EsmForProteinFolding(config=_SCREAMING_SNAKE_CASE ).float()
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = False
lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase = ()
lowerCAmelCase = {} if is_torch_available() else {}
lowerCAmelCase = False
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip
def a__ ( self ) -> Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def a__ ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def a__ ( self ) -> str:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def a__ ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def a__ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ ( self ) -> List[Any]:
pass
@require_torch
class __a( _a ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )['''positions''']
UpperCAmelCase_ : List[str] = torch.tensor([2.58_28, 0.79_93, -10.93_34] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 235
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BioGptTokenizer
__UpperCAmelCase : List[Any] = False
def __UpperCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCAmelCase ( self , _a ):
__a = '''lower newer'''
__a = '''lower newer'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = BioGptTokenizer(self.vocab_file , self.merges_file )
__a = '''lower'''
__a = ['''low''', '''er</w>''']
__a = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokens + ['''<unk>''']
__a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@slow
def __UpperCAmelCase ( self ):
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 45
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = RobertaTokenizer
__A = RobertaTokenizerFast
__A = True
__A = {"cls_token": "<s>"}
def lowercase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ :List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase_ :List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowercase_ :Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ :Union[str, Any] = {"unk_token": "<unk>"}
lowercase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def lowercase__ ( self : str , **lowercase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : int , **lowercase : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :List[str] = "lower newer"
lowercase_ :Any = "lower newer"
return input_text, output_text
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ :Dict = "lower newer"
lowercase_ :Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase_ :int = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
lowercase_ :Optional[Any] = tokens + [tokenizer.unk_token]
lowercase_ :Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.tokenizer_class.from_pretrained("roberta-base" )
lowercase_ :Any = tokenizer.encode("sequence builders" , add_special_tokens=lowercase )
lowercase_ :str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase )
lowercase_ :int = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :str = tokenizer.build_inputs_with_special_tokens(lowercase )
lowercase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.get_tokenizer()
lowercase_ :str = "Encode this sequence."
lowercase_ :Tuple = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowercase_ :List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
lowercase_ :Union[str, Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
lowercase_ :Any = tokenizer.convert_tokens_to_ids(lowercase )
lowercase_ :Tuple = "Encode <mask> sequence"
lowercase_ :int = "Encode <mask>sequence"
lowercase_ :str = tokenizer.encode(lowercase )
lowercase_ :Any = encoded.index(lowercase )
lowercase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
lowercase_ :str = tokenizer.encode(lowercase )
lowercase_ :int = encoded.index(lowercase )
lowercase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase__ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :Dict = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :str = "A, <mask> AllenNLP sentence."
lowercase_ :Tuple = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
lowercase_ :str = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase_ :Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase_ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowercase__ ( self : Dict ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ :Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :Tuple = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ :Optional[Any] = F'{text_of_1_token} {text_of_1_token}'
lowercase_ :int = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[int] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Any = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Dict = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
lowercase_ :Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 223
| 0
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : int ):
snake_case__ : Optional[Any] = num_of_nodes
snake_case__ : list[list[int]] = []
snake_case__ : dict[int, int] = {}
def _lowercase ( self : Optional[int] , __A : int , __A : int , __A : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : int , __A : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : Dict , __A : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case__ : Any = self.find_component(__A )
def _lowercase ( self : Tuple , __A : list[int] , __A : int , __A : int ):
if component_size[u_node] <= component_size[v_node]:
snake_case__ : int = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
snake_case__ : Optional[Any] = self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def _lowercase ( self : str ):
snake_case__ : Tuple = []
snake_case__ : Union[str, Any] = 0
snake_case__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case__ : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case__, snake_case__, snake_case__ : str = edge
snake_case__ : Any = self.m_component[u]
snake_case__ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case__ : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
snake_case__, snake_case__, snake_case__ : List[str] = edge
snake_case__ : List[Any] = self.m_component[u]
snake_case__ : str = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
snake_case__ : Union[str, Any] = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 286
| 1
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def __snake_case ( self : Tuple):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __snake_case ( self : Optional[int]):
a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
a : Any = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
a : Any = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
a : Dict = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __snake_case ( self : Optional[int]):
a : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
a : Optional[Any] = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
a : Dict = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
a : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
a : Optional[int] = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __snake_case ( self : int):
a : int = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
a : Tuple = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
@slow
@require_torch
def __snake_case ( self : Dict):
a : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(__UpperCAmelCase)
@slow
@require_tf
def __snake_case ( self : Optional[int]):
a : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Optional[int]):
a : Tuple = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
a : List[str] = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
a : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __snake_case ( self : Union[str, Any]):
a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
a : Dict = None
a : str = None
self.run_pipeline_test(__UpperCAmelCase , [])
@require_tf
def __snake_case ( self : Tuple):
a : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
a : Tuple = None
a : Optional[Any] = None
self.run_pipeline_test(__UpperCAmelCase , [])
def __snake_case ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
a : Optional[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Dict = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = fill_masker.tokenizer
a : Union[str, Any] = fill_masker.model
a : Union[str, Any] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''])
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : List[str] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''])
self.assertEqual(
__UpperCAmelCase , [
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
] , )
with self.assertRaises(__UpperCAmelCase):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase):
fill_masker("This is")
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase)
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase)
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase)
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase)
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int):
a : str = tokenizer.get_vocab()
a : Any = sorted(vocab.keys())[:2]
# Pipeline argument
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase)
a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''')
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : int = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase)
a : Tuple = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase))
# Call argument
a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : Any = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase)
a : List[Any] = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase))
# Score equivalence
a : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
a : Optional[Any] = [top_mask["token_str"] for top_mask in outputs]
a : List[Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase) == set(__UpperCAmelCase):
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
a : List[str] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
# Raises with invalid
with self.assertRaises(__UpperCAmelCase):
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase):
a : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""])
with self.assertRaises(__UpperCAmelCase):
a : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="")
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str):
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2)
a : str = fill_masker(f'''This is a {tokenizer.mask_token}''')
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2)
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str):
a : Tuple = tokenizer.get_vocab()
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
# top_k=2, ntargets=3
a : Dict = sorted(vocab.keys())[:3]
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__UpperCAmelCase)
# If we use the most probably targets, and filter differently, we should still
# have the same results
a : Union[str, Any] = [el["token_str"] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase).issubset(__UpperCAmelCase):
a : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__UpperCAmelCase)
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any):
a : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : str = tokenizer.get_vocab()
# String duplicates + id duplicates
a : List[Any] = sorted(vocab.keys())[:3]
a : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
a : Union[str, Any] = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=__UpperCAmelCase , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase) , 3)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]):
a : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Tuple = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2)
self.assertEqual(
__UpperCAmelCase , [
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
] , )
| 40
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
__UpperCAmelCase = 3e8 # unit of c : m * s^-1
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ :Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
| 0
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( A__ ):
"""simple docstring"""
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(A__ )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(A__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A__ ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def _A ( A__ , A__ ):
"""simple docstring"""
return "".join(cipher_map.get(A__ , A__ ) for ch in message.upper() )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A__ , A__ ) for ch in message.upper() )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message to encode or decode: ''' ).strip()
__lowercase = input('''Enter keyword: ''' ).strip()
__lowercase = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__lowercase = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__lowercase = create_cipher_map(A__ )
print(func(A__ , A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'gpt_neo'
SCREAMING_SNAKE_CASE : Any = ['past_key_values']
SCREAMING_SNAKE_CASE : Union[str, Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Any ,lowercase__ : Tuple=5_0_2_5_7 ,lowercase__ : Union[str, Any]=2_0_4_8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : Optional[Any]=2_4 ,lowercase__ : Union[str, Any]=[[["global", "local"], 1_2]] ,lowercase__ : List[Any]=1_6 ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[int]=2_5_6 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Dict=0.0 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[str]=1e-5 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=True ,lowercase__ : int=5_0_2_5_6 ,lowercase__ : Any=5_0_2_5_6 ,**lowercase__ : Optional[Any] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_layers
__lowercase = num_heads
__lowercase = intermediate_size
__lowercase = window_size
__lowercase = activation_function
__lowercase = resid_dropout
__lowercase = embed_dropout
__lowercase = attention_dropout
__lowercase = classifier_dropout
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = attention_types
__lowercase = self.expand_attention_types_params(lowercase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ):
__lowercase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = input.size()
__lowercase = len(A__ )
__lowercase = shape[dimension]
__lowercase = torch.arange(0 , A__ , A__ )
__lowercase = torch.div(sizedim - size , A__ , rounding_mode='''floor''' ) + 1
__lowercase = torch.arange(A__ ) + low_indices[:min_length][:, None]
__lowercase = [slice(A__ )] * rank
__lowercase = indices
__lowercase = input[s]
__lowercase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = torch.arange(1 , A__ )
__lowercase = torch.remainder(A__ , A__ )
__lowercase = remainders == 0
__lowercase = candidates[divisor_indices]
__lowercase = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode='''floor''' )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
__lowercase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = super(lowercase__ ,self ).generate_dummy_inputs(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs['''attention_mask''']
if self.use_past:
__lowercase = ordered_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return 1_3
| 52
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = "▁"
__a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__a = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__a = {
"facebook/s2t-small-librispeech-asr": 10_24,
}
__a = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__a = {"mustc": MUSTC_LANGS}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : int = MAX_MODEL_INPUT_SIZES
_A : Dict = ["""input_ids""", """attention_mask"""]
_A : List[int] = []
def __init__( self: Dict , snake_case: List[str] , snake_case: Tuple , snake_case: List[Any]="<s>" , snake_case: List[Any]="</s>" , snake_case: Optional[int]="<pad>" , snake_case: Any="<unk>" , snake_case: Tuple=False , snake_case: List[Any]=False , snake_case: int=None , snake_case: Optional[Any]=None , snake_case: Optional[Dict[str, Any]] = None , **snake_case: Tuple , ) -> None:
snake_case_ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , do_upper_case=snake_case , do_lower_case=snake_case , tgt_lang=snake_case , lang_codes=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
snake_case_ :Union[str, Any] = do_upper_case
snake_case_ :int = do_lower_case
snake_case_ :List[str] = load_json(snake_case )
snake_case_ :Union[str, Any] = {v: k for k, v in self.encoder.items()}
snake_case_ :Optional[int] = spm_file
snake_case_ :List[str] = load_spm(snake_case , self.sp_model_kwargs )
if lang_codes is not None:
snake_case_ :Tuple = lang_codes
snake_case_ :List[Any] = LANGUAGES[lang_codes]
snake_case_ :Union[str, Any] = [f"""<lang:{lang}>""" for lang in self.langs]
snake_case_ :str = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
snake_case_ :Optional[int] = self.lang_tokens
snake_case_ :Dict = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case_ :int = {}
@property
def lowerCAmelCase_ ( self: List[str] ) -> int:
return len(self.encoder )
@property
def lowerCAmelCase_ ( self: Dict ) -> str:
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase_ ( self: str , snake_case: str ) -> None:
snake_case_ :Any = new_tgt_lang
self.set_tgt_lang_special_tokens(snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = self.lang_code_to_id[tgt_lang]
snake_case_ :List[Any] = [lang_code_id]
def lowerCAmelCase_ ( self: int , snake_case: str ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[int] ) -> List[str]:
return self.encoder.get(snake_case , self.encoder[self.unk_token] )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int ) -> str:
return self.decoder.get(snake_case , self.unk_token )
def lowerCAmelCase_ ( self: Dict , snake_case: List[str] ) -> str:
snake_case_ :Optional[int] = []
snake_case_ :Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case_ :Any = self.sp_model.decode(snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case_ :List[str] = []
else:
current_sub_tokens.append(snake_case )
snake_case_ :Any = self.sp_model.decode(snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: str , snake_case: Any=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self: Tuple , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
snake_case_ :Union[str, Any] = [1] * len(self.prefix_tokens )
snake_case_ :Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case )) + suffix_ones
return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def lowerCAmelCase_ ( self: Any ) -> Dict:
snake_case_ :List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Dict ) -> Dict:
snake_case_ :Union[str, Any] = self.__dict__.copy()
snake_case_ :List[Any] = None
return state
def __setstate__( self: Union[str, Any] , snake_case: Dict ) -> None:
snake_case_ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :int = {}
snake_case_ :Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
snake_case_ :Optional[Any] = Path(snake_case )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
snake_case_ :int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
snake_case_ :Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case )
elif not os.path.isfile(self.spm_file ):
with open(snake_case , """wb""" ) as fi:
snake_case_ :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (str(snake_case ), str(snake_case ))
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = sentencepiece.SentencePieceProcessor(**_lowercase )
spm.Load(str(_lowercase ) )
return spm
def A_ ( _lowercase ):
'''simple docstring'''
with open(_lowercase, """r""" ) as f:
return json.load(_lowercase )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
with open(_lowercase, """w""" ) as f:
json.dump(_lowercase, _lowercase, indent=2 )
| 66
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Iterator[int]:
"""simple docstring"""
a_ : Optional[int] = 2
while True:
if is_prime(__A ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( __A : int = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda __A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 120
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120
| 1
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 235
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 235
| 1
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowercase , "num_attention_heads" ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]=13 , __UpperCAmelCase : Optional[Any]=64 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=16 , __UpperCAmelCase : int=[128, 256, 384] , __UpperCAmelCase : Union[str, Any]=[4, 6, 8] , __UpperCAmelCase : Any=[2, 3, 4] , __UpperCAmelCase : str=[16, 16, 16] , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : int=[2, 2, 2] , __UpperCAmelCase : Dict=[2, 2, 2] , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Union[str, Any]=2 , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = kernel_size
_A = stride
_A = padding
_A = hidden_sizes
_A = num_attention_heads
_A = depths
_A = key_dim
_A = drop_path_rate
_A = patch_size
_A = attention_ratio
_A = mlp_ratio
_A = initializer_range
_A = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_A = is_training
_A = use_labels
_A = num_labels
_A = initializer_range
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : int ):
'''simple docstring'''
_A = LevitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
_A = model(__lowercase )
_A = (self.image_size, self.image_size)
_A = image_size[0], image_size[1]
for _ in range(4 ):
_A = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_A = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = self.num_labels
_A = LevitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
_A = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
snake_case = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = LevitModelTester(self )
_A = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__lowercase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ):
_A = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_A = outputs.hidden_states
_A = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowercase ) , __lowercase )
_A = (self.model_tester.image_size, self.model_tester.image_size)
_A = image_size[0], image_size[1]
for _ in range(4 ):
_A = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_A = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=False ):
'''simple docstring'''
_A = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_A = model_class(__lowercase )
model.to(__lowercase )
model.train()
_A = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
_A = model(**__lowercase ).loss
loss.backward()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_A = model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
_A = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
_A = model(**__lowercase ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
_A = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
_A = problem_type['''title''']
_A = problem_type['''num_labels''']
_A = model_class(__lowercase )
model.to(__lowercase )
model.train()
_A = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if problem_type["num_labels"] > 1:
_A = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_A = inputs['''labels'''].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowercase ) as warning_list:
_A = model(**__lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCAmelCase ( self : int ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = LevitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
_A = model(**__lowercase )
# verify the logits
_A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
_A = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 350
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 174
| 0
|
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ = True , snake_case_ = False ):
"""simple docstring"""
A_ : List[str] = scheduler
A_ : str = optimizers if isinstance(snake_case_ , (list, tuple) ) else [optimizers]
A_ : Optional[Any] = split_batches
A_ : Union[str, Any] = step_with_optimizer
A_ : Dict = GradientState()
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*snake_case_ , **snake_case_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*snake_case_ , **snake_case_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A_ : Union[str, Any] = AcceleratorState().num_processes
for _ in range(snake_case_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*snake_case_ , **snake_case_ )
else:
self.scheduler.step(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
self.scheduler.load_state_dict(snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.scheduler.print_lr(*snake_case_ , **snake_case_ )
| 286
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 286
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : List[str] ):
if isinstance(__snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : str , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Any , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = size if size is not None else {"shortest_edge": 2_2_4}
lowerCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowerCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
lowerCAmelCase = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size['shortest_edge'] , default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
lowerCAmelCase = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> int:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
lowerCAmelCase = self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
lowerCAmelCase = self.center_crop(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE )
if do_rescale:
lowerCAmelCase = self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE )
if do_normalize:
lowerCAmelCase = self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return image
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowerCAmelCase = make_batched(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=_SCREAMING_SNAKE_CASE , do_rescale=_SCREAMING_SNAKE_CASE , rescale_factor=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , )
for img in video
]
for video in videos
]
lowerCAmelCase = {"pixel_values": videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 363
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple ) -> Tuple:
# test for the above condition
self.test()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = 0
lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCAmelCase ( self : Dict ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Any ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : List[int] ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCAmelCase = token_ids
lowerCAmelCase = len(self.token_ids )
lowerCAmelCase = -1 # the index of the currently fulfilled step
lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase = True
lowerCAmelCase = completed
else:
# failed to make progress.
lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = False
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=False ) -> Optional[int]:
lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.fulfilled_idx
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : str=True ) -> str:
lowerCAmelCase = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowerCAmelCase = {}
for token_ids in nested_token_ids:
lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowerCAmelCase = {}
lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowerCAmelCase = root
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.trie
for current_token in current_seq:
lowerCAmelCase = start[current_token]
lowerCAmelCase = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : List[List[int]] ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCAmelCase = DisjunctiveTrie(UpperCAmelCase__ )
lowerCAmelCase = nested_token_ids
lowerCAmelCase = self.trie.max_height
lowerCAmelCase = []
lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowerCAmelCase = True
else:
lowerCAmelCase = True
self.reset()
lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = False
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.current_seq
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Constraint] ) -> str:
lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase = max([c.seqlen for c in constraints] )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = False
self.init_state()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = None
lowerCAmelCase = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[List[int]] ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase , lowerCAmelCase = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCAmelCase , lowerCAmelCase = False, False
if self.completed:
lowerCAmelCase = True
lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowerCAmelCase = None
if not complete and stepped:
lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55
| 0
|
def A_ ( _lowerCAmelCase = 1000 ) -> int:
UpperCamelCase , UpperCamelCase : Any = 1, 1
UpperCamelCase : Dict = []
for i in range(1 , n + 1 ):
UpperCamelCase : Union[str, Any] = prev_numerator + 2 * prev_denominator
UpperCamelCase : List[Any] = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
UpperCamelCase : Dict = numerator
UpperCamelCase : Dict = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCamelCase : Union[str, Any] = pytest.mark.integration
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
UpperCamelCase : List[Any] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=A_ )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dict = faiss.IndexFlat(5 )
UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase : str = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : int = 1
UpperCamelCase , UpperCamelCase : Dict = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import faiss
UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase : List[Any] = "index.faiss"
UpperCamelCase : List[str] = F"""mock://{index_name}"""
index.save(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[int] = 1
UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = Elasticsearch()
UpperCamelCase : Union[str, Any] = {"acknowledged": True}
UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase : str = "foo"
UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase : Dict = "foo"
UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase : Dict = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ )
UpperCamelCase : str = [scores[0] for scores in total_scores]
UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
UpperCamelCase : int = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 )
UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 52
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , a__ , a__=2 , a__=8 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=16 , a__=5 , a__=2 , a__=36 , a__="gelu" , a__=0.0 , a__=0.0 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : Optional[int] = scope
def __A ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_config()
_lowerCAmelCase : str = 300
return config
def __A ( self ):
(
_lowerCAmelCase
) : int = self.prepare_config_and_inputs()
_lowerCAmelCase : Dict = True
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = MraModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : str = model(a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[str] = MraModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , )
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = MraForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = MraForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MraForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MraForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_choices
_lowerCAmelCase : Tuple = MraForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = ()
def __A ( self ):
_lowerCAmelCase : List[str] = MraModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def __A ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = MraModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ):
return
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Dict = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowerCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(a__ )[0]
_lowerCAmelCase : Any = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : str = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowerCAmelCase : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ )[0]
_lowerCAmelCase : str = 50265
_lowerCAmelCase : Optional[int] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : int = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 11.8819], [9.3_8_6_9, -3.2_6_9_3, 11.0956], [11.8524, -3.4_9_3_8, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
@slow
def __A ( self ):
_lowerCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_lowerCAmelCase : str = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase : Any = model(a__ )[0]
_lowerCAmelCase : List[Any] = 50265
_lowerCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : List[str] = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 370
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
| 126
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase_ : str = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase ) , torch_builtin(lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase ) , gelu_new(lowerCamelCase ) ) )
def __lowercase ( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : Tuple = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase_ : List[Any] = get_activation("""gelu""" )
lowerCAmelCase_ : Optional[Any] = get_activation("""gelu_10""" )
lowerCAmelCase_ : Tuple = torch_builtin(lowerCamelCase )
lowerCAmelCase_ : Tuple = geluaa(lowerCamelCase )
lowerCAmelCase_ : List[str] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowercase ( self : Any ) -> Union[str, Any]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(lowerCamelCase ):
get_activation("""bogus""" )
with self.assertRaises(lowerCamelCase ):
get_activation(lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : Optional[int] = get_activation("""gelu""" )
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Union[str, Any] = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCamelCase ):
lowerCAmelCase_ : Tuple = acta.a
| 120
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A : Optional[int] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A : int = "UperNetConfig"
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
lowerCAmelCase_ : int = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
lowerCAmelCase_ : Dict = nn.BatchNormad(lowerCamelCase )
lowerCAmelCase_ : Dict = nn.ReLU()
def __lowercase ( self : Tuple , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
lowerCAmelCase_ : Optional[Any] = self.conv(lowerCamelCase )
lowerCAmelCase_ : Tuple = self.batch_norm(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self.activation(lowerCamelCase )
return output
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
lowerCAmelCase_ : str = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __lowercase ( self : List[str] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
lowerCAmelCase_ : List[Any] = input
for layer in self.layers:
lowerCAmelCase_ : Tuple = layer(lowerCamelCase )
return hidden_state
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
lowerCAmelCase_ : List[str] = pool_scales
lowerCAmelCase_ : Union[str, Any] = align_corners
lowerCAmelCase_ : Tuple = in_channels
lowerCAmelCase_ : List[str] = channels
lowerCAmelCase_ : Tuple = []
for i, pool_scale in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
lowerCAmelCase_ : Any = []
for ppm in self.blocks:
lowerCAmelCase_ : Any = ppm(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> Dict:
super().__init__()
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Any = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCAmelCase_ : Dict = in_channels
lowerCAmelCase_ : Any = config.hidden_size
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCAmelCase_ : Dict = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCAmelCase_ : Union[str, Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCAmelCase_ : Dict = nn.ModuleList()
lowerCAmelCase_ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCAmelCase_ : List[str] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
lowerCAmelCase_ : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
lowerCAmelCase_ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __lowercase ( self : List[Any] ) -> Any:
self.apply(self._init_weights )
def __lowercase ( self : Optional[int] , lowerCamelCase : str ) -> List[Any]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowercase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> Any:
lowerCAmelCase_ : Union[str, Any] = inputs[-1]
lowerCAmelCase_ : List[str] = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
lowerCAmelCase_ : str = torch.cat(lowerCamelCase , dim=1 )
lowerCAmelCase_ : str = self.bottleneck(lowerCamelCase )
return output
def __lowercase ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
lowerCAmelCase_ : Optional[int] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
lowerCAmelCase_ : Tuple = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase_ : Union[str, Any] = laterals[i - 1].shape[2:]
lowerCAmelCase_ : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowerCAmelCase_ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase_ : Union[str, Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowerCAmelCase_ : Dict = torch.cat(lowerCamelCase , dim=1 )
lowerCAmelCase_ : Any = self.fpn_bottleneck(lowerCamelCase )
lowerCAmelCase_ : str = self.classifier(lowerCamelCase )
return output
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Dict = config.auxiliary_in_channels
lowerCAmelCase_ : Optional[Any] = config.auxiliary_channels
lowerCAmelCase_ : Dict = config.auxiliary_num_convs
lowerCAmelCase_ : int = config.auxiliary_concat_input
lowerCAmelCase_ : List[Any] = in_index
lowerCAmelCase_ : List[Any] = (kernel_size // 2) * dilation
lowerCAmelCase_ : Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
lowerCAmelCase_ : Optional[Any] = nn.Identity()
else:
lowerCAmelCase_ : List[str] = nn.Sequential(*lowerCamelCase )
if self.concat_input:
lowerCAmelCase_ : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
lowerCAmelCase_ : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __lowercase ( self : int ) -> List[Any]:
self.apply(self._init_weights )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowercase ( self : Any , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
lowerCAmelCase_ : Dict = encoder_hidden_states[self.in_index]
lowerCAmelCase_ : List[str] = self.convs(lowerCamelCase )
if self.concat_input:
lowerCAmelCase_ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCAmelCase_ : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = UperNetConfig
lowercase = 'pixel_values'
lowercase = True
def __lowercase ( self : List[str] , lowerCamelCase : Dict ) -> Optional[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowercase ( self : Optional[int] ) -> int:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Any=False ) -> Optional[int]:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : str = value
__A : Union[str, Any] = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : str = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' ,_SCREAMING_SNAKE_CASE ,)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCAmelCase_ : Optional[Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
lowerCAmelCase_ : List[Any] = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __lowercase ( self : Any , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCAmelCase_ : Dict = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = outputs.feature_maps
lowerCAmelCase_ : Dict = self.decode_head(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase )
lowerCAmelCase_ : Tuple = None
if self.auxiliary_head is not None:
lowerCAmelCase_ : Optional[int] = self.auxiliary_head(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase )
lowerCAmelCase_ : str = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowerCAmelCase_ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCAmelCase_ : int = loss_fct(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = loss_fct(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCAmelCase_ : int = (logits,) + outputs[1:]
else:
lowerCAmelCase_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 120
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : int = num_patches + 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ViTMSNModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.type_sequence_label_size
lowerCamelCase__ : Dict = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, labels=lowerCamelCase_ )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : str = 1
lowerCamelCase__ : Tuple = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[str] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ViTMSNModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = ViTMSNModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
torch.manual_seed(2 )
lowerCamelCase__ : Any = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(lowerCamelCase_ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_A : str =logging.get_logger(__name__)
_A : str ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_A : Dict =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
for attribute in key.split(""".""" ):
lowerCamelCase__ : Union[str, Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCamelCase__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowerCamelCase__ : str = value
elif weight_type == "weight_v":
lowerCamelCase__ : Any = value
elif weight_type == "bias":
lowerCamelCase__ : str = value
else:
lowerCamelCase__ : Any = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[Any] = fairseq_model.state_dict()
lowerCamelCase__ : Dict = hf_model.feature_extractor
lowerCamelCase__ : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase__ : Tuple = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase__ : Dict = True
if "*" in mapped_key:
lowerCamelCase__ : Dict = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCamelCase__ : Any = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowerCamelCase__ : Any = """weight_v"""
elif "bias" in name:
lowerCamelCase__ : List[str] = """bias"""
elif "weight" in name:
lowerCamelCase__ : int = """weight"""
else:
lowerCamelCase__ : Optional[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Any = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase__ : int = name.split(""".""" )
lowerCamelCase__ : List[Any] = int(items[0] )
lowerCamelCase__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
lowerCamelCase__ : Optional[int] = name.split(""".""" )
if items[1].isdigit():
lowerCamelCase__ : Dict = int(items[1] )
else:
lowerCamelCase__ : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCamelCase__ : List[Any] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCamelCase__ : Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCamelCase__ : Tuple = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCamelCase , UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCamelCase__ : Any = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCamelCase__ : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.weight.shape
lowerCamelCase__ : int = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : str = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = WavaVecaConfig.from_pretrained(
UpperCamelCase , add_adapter=UpperCamelCase , adapter_stride=UpperCamelCase , adapter_kernel_size=UpperCamelCase , use_auth_token=UpperCamelCase , output_hidden_size=UpperCamelCase , )
lowerCamelCase__ : Dict = MBartConfig.from_pretrained(UpperCamelCase )
# load model
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowerCamelCase__ : Union[str, Any] = model[0].eval()
# load feature extractor
lowerCamelCase__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase , use_auth_token=UpperCamelCase )
# set weights for wav2vec2 encoder
lowerCamelCase__ : int = WavaVecaModel(UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase )
# load decoder weights
lowerCamelCase__ : int = MBartForCausalLM(UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase__ : List[str] = SpeechEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = MBartaaTokenizer(UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
lowerCamelCase__ : List[str] = hf_wavavec.config.to_dict()
lowerCamelCase__ : Union[str, Any] = tokenizer.pad_token_id
lowerCamelCase__ : Dict = tokenizer.bos_token_id
lowerCamelCase__ : List[str] = tokenizer.eos_token_id
lowerCamelCase__ : Tuple = """mbart50"""
lowerCamelCase__ : int = """wav2vec2"""
lowerCamelCase__ : Any = tokenizer.eos_token_id
lowerCamelCase__ : List[Any] = 250004
lowerCamelCase__ : Dict = tokenizer.eos_token_id
lowerCamelCase__ : Tuple = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase )
hf_wavavec.save_pretrained(UpperCamelCase )
feature_extractor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''')
_A : Optional[Any] =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 41
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert x is not None
assert y is not None
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# declaring the array for storing the dp values
__lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
for i in range(1, m + 1):
for j in range(1, n + 1):
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCAmelCase = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match)
__lowerCAmelCase = ''''''
__lowerCAmelCase , __lowerCAmelCase = m, n
while i > 0 and j > 0:
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = """AGGTAB"""
_UpperCAmelCase : int = """GXTXAYB"""
_UpperCAmelCase : Any = 4
_UpperCAmelCase : List[Any] = """GTAB"""
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 174
| 0
|
"""simple docstring"""
import numpy as np
from PIL import Image
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
__SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__SCREAMING_SNAKE_CASE = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__SCREAMING_SNAKE_CASE = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
return updated_arr
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
__SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__SCREAMING_SNAKE_CASE = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__SCREAMING_SNAKE_CASE = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
a__ : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 195
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = R"\w+[.]\d+"
__SCREAMING_SNAKE_CASE = re.findall(lowerCAmelCase_ , lowerCAmelCase_ )
for pat in pats:
__SCREAMING_SNAKE_CASE = key.replace(lowerCAmelCase_ , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=42 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE = flax_model.init_weights(PRNGKey(lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE = rename_key(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE = jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
| 195
| 1
|
'''simple docstring'''
import math
def __magic_name__ ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
snake_case_ = []
snake_case_ = 2
snake_case_ = int(math.sqrt(__UpperCAmelCase ) ) # Size of every segment
snake_case_ = [True] * (end + 1)
snake_case_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__UpperCAmelCase )
for i in range(start * start, end + 1, __UpperCAmelCase ):
snake_case_ = False
start += 1
prime += in_prime
snake_case_ = end + 1
snake_case_ = min(2 * end, __UpperCAmelCase )
while low <= n:
snake_case_ = [True] * (high - low + 1)
for each in in_prime:
snake_case_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__UpperCAmelCase, high + 1, __UpperCAmelCase ):
snake_case_ = False
for j in range(len(__UpperCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ = high + 1
snake_case_ = min(high + end, __UpperCAmelCase )
return prime
print(sieve(10**6))
| 56
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ : Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ : int = """sshleifer/student_marian_en_ro_6_1"""
a_ : str = """sshleifer/tiny-mbart"""
@require_torch
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , ):
"""simple docstring"""
lowerCamelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()]
lowerCamelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=UpperCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def snake_case ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowerCamelCase_ = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowerCamelCase_ = experiments[experiment_id]
lowerCamelCase_ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowerCamelCase_ = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data["extra_args_str"] )
lowerCamelCase_ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data["n_matches"] )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history
lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()]
lowerCamelCase_ = eval_metrics[0]
lowerCamelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase )
# test if do_predict saves generations and metrics
lowerCamelCase_ = os.listdir(UpperCamelCase )
lowerCamelCase_ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def snake_case ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase ) -> Tuple[int, float]:
lowerCamelCase_ = "--skip_memory_metrics 0"
lowerCamelCase_ = self.run_trainer(
max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(Path(UpperCamelCase , "trainer_state.json" ) ).log_history
lowerCamelCase_ = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowerCamelCase_ = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowerCamelCase_ = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 3e-3 , UpperCamelCase = "adafactor" , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
lowerCamelCase_ = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
lowerCamelCase_ = "\n --do_predict\n ".split()
lowerCamelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase_ = get_gpu_count()
lowerCamelCase_ = get_torch_dist_unique_port()
lowerCamelCase_ = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
lowerCamelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
lowerCamelCase_ = ["run_translation.py"] + args
with patch.object(UpperCamelCase , "argv" , UpperCamelCase ):
main()
return output_dir
| 55
| 0
|
_snake_case : List[Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 207
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Dict = 0
_snake_case : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> None:
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
__lowerCAmelCase = self.g_cost + self.h_cost
def lowercase ( self : Any ) -> float:
__lowerCAmelCase = self.pos_x - self.goal_x
__lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Tuple:
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowercase ( self : str ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Node ) -> list[Node]:
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def lowercase ( self : Tuple , lowerCAmelCase_ : Node | None ) -> list[TPosition]:
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> None:
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
def lowercase ( self : Dict ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase ( self : Dict , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> list[TPosition]:
__lowerCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase_ )
__lowerCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : List[Any] = (0, 0)
_snake_case : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : int = time.time()
_snake_case : Optional[int] = AStar(init, goal)
_snake_case : int = a_star.search()
_snake_case : Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_snake_case : Any = time.time()
_snake_case : Dict = BidirectionalAStar(init, goal)
_snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 207
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase__ : int = (boundary[1] - boundary[0]) / steps
lowerCamelCase__ : Optional[int] = boundary[0]
lowerCamelCase__ : Union[str, Any] = boundary[1]
lowerCamelCase__ : Optional[Any] = make_points(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ : str = 0.0
y += (h / 2.0) * f(snake_case_ )
for i in x_i:
# print(i)
y += h * f(snake_case_ )
y += (h / 2.0) * f(snake_case_ )
return y
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : int = a + h
while x < (b - h):
yield x
lowerCamelCase__ : int = x + h
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: # enter your function here
lowerCamelCase__ : List[str] = (x - 0) * (x - 0)
return y
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Optional[int] = 0.0 # Lower bound of integration
lowerCamelCase__ : Optional[int] = 1.0 # Upper bound of integration
lowerCamelCase__ : List[str] = 10.0 # define number of steps or resolution
lowerCamelCase__ : int = [a, b] # define boundary of integration
lowerCamelCase__ : List[Any] = method_a(snake_case_ , snake_case_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 41
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Tuple:
lowerCamelCase__ : List[str] =fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , snake_case_ ).groups()[0]
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None ):
"""simple docstring"""
lowerCamelCase__ : Tuple =file_names
lowerCamelCase__ : str =image_transform
lowerCamelCase__ : str =label_to_id
def __len__( self :Optional[int] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self :Optional[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.file_names[idx]
lowerCamelCase__ : Dict =PIL.Image.open(lowerCamelCase_ )
lowerCamelCase__ : Dict =raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : int =self.image_transform(lowerCamelCase_ )
lowerCamelCase__ : List[str] =extract_label(lowerCamelCase_ )
if self.label_to_id is not None:
lowerCamelCase__ : Optional[int] =self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) ->Dict:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[Any] =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[str] =int(config['seed'] )
lowerCamelCase__ : Dict =int(config['batch_size'] )
lowerCamelCase__ : Optional[int] =config['image_size']
if not isinstance(snake_case_ , (list, tuple) ):
lowerCamelCase__ : Optional[int] =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Optional[Any] =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Union[str, Any] =int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : int =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Tuple =os.path.split(snake_case_ )[-1].split('.' )[0]
accelerator.init_trackers(snake_case_ , snake_case_ )
# Grab all the image filenames
lowerCamelCase__ : List[str] =[os.path.join(args.data_dir , snake_case_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : str =[extract_label(snake_case_ ) for fname in file_names]
lowerCamelCase__ : Any =list(set(snake_case_ ) )
id_to_label.sort()
lowerCamelCase__ : List[Any] ={lbl: i for i, lbl in enumerate(snake_case_ )}
# Set the seed before splitting the data.
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# Split our filenames between train and validation
lowerCamelCase__ : int =np.random.permutation(len(snake_case_ ) )
lowerCamelCase__ : Tuple =int(0.8 * len(snake_case_ ) )
lowerCamelCase__ : str =random_perm[:cut]
lowerCamelCase__ : Dict =random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : str =Compose([RandomResizedCrop(snake_case_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : Any =PetsDataset(
[file_names[i] for i in train_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Optional[int] =Compose([Resize(snake_case_ ), ToTensor()] )
lowerCamelCase__ : Dict =PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
lowerCamelCase__ : int =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Dict =create_model('resnet50d' , pretrained=snake_case_ , num_classes=len(snake_case_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Dict =False
for param in model.get_classifier().parameters():
lowerCamelCase__ : List[str] =True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Any =torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Dict =torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int =torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict =OneCycleLR(optimizer=snake_case_ , max_lr=snake_case_ , epochs=snake_case_ , steps_per_epoch=len(snake_case_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : int =0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : int =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : int =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Optional[int] =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Tuple =os.path.splitext(snake_case_ )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Union[str, Any] =int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Optional[int] =None
else:
lowerCamelCase__ : List[Any] =int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int =resume_step // len(snake_case_ )
resume_step -= starting_epoch * len(snake_case_ )
# Now we train the model
for epoch in range(snake_case_ , snake_case_ ):
model.train()
if args.with_tracking:
lowerCamelCase__ : str =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Tuple =accelerator.skip_first_batches(snake_case_ , snake_case_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : int ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] =(batch['image'] - mean) / std
lowerCamelCase__ : Any =model(snake_case_ )
lowerCamelCase__ : List[Any] =torch.nn.functional.cross_entropy(snake_case_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : int =f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
model.eval()
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Union[str, Any] ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : int =(batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : List[str] =model(snake_case_ )
lowerCamelCase__ : Optional[int] =outputs.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Union[str, Any] =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : List[str] =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_0_0 * eval_metric,
'train_loss': total_loss.item() / len(snake_case_ ),
'epoch': epoch,
} , step=snake_case_ , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple =f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Tuple =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) ->int:
lowerCamelCase__ : Any =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=snake_case_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=snake_case_ , default=snake_case_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=snake_case_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=snake_case_ , default=snake_case_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=snake_case_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Dict =parser.parse_args()
lowerCamelCase__ : List[str] ={'lr': 3E-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 126
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 10
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 2, 3, 4]
a = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
a , a = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ""
a , a = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
self.assertEqual(lowerCamelCase_ , [] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
a , a = process_story(lowerCamelCase_ )
a = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
a = ["It was the best of times."]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch.tensor([1, 2, 3, 4] )
a = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 0 ).numpy() , expected.numpy() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 23 ).numpy() , expected.numpy() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 1 ).numpy() , expected.numpy() )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 101
a = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a = compute_token_type_ids(lowerCamelCase_ , lowerCamelCase_ )
np.testing.assert_array_equal(lowerCamelCase_ , lowerCamelCase_ )
| 71
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = 8
# DPR tok
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase_ ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase_ (self ):
"""simple docstring"""
a = os.path.join(self.tmpdirname , "rag_tokenizer" )
a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
a = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
a = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
a = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
a = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
a = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 71
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :list[int] ) -> None:
__UpperCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : int = [1, 3, 0, 5, 8, 5]
UpperCamelCase : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case : Tuple = 0
snake_case : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case : Dict = tuple[int, int]
class _snake_case :
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Any = pos_x
__magic_name__ : Dict = pos_y
__magic_name__ : Any = (pos_y, pos_x)
__magic_name__ : List[Any] = goal_x
__magic_name__ : Optional[int] = goal_y
__magic_name__ : str = g_cost
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Optional[int] = self.calculate_heuristic()
__magic_name__ : int = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.pos_x - self.goal_x
__magic_name__ : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__magic_name__ : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__magic_name__ : str = [self.start]
__magic_name__ : list[Node] = []
__magic_name__ : Dict = False
def SCREAMING_SNAKE_CASE ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__magic_name__ : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__magic_name__ : str = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__magic_name__ : List[Any] = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = []
for action in delta:
__magic_name__ : Dict = parent.pos_x + action[1]
__magic_name__ : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = node
__magic_name__ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__magic_name__ : int = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : str = AStar(_a , _a )
__magic_name__ : List[str] = AStar(_a , _a )
__magic_name__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__magic_name__ : int = self.fwd_astar.open_nodes.pop(0 )
__magic_name__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
__magic_name__ : List[Any] = current_bwd_node
__magic_name__ : str = current_fwd_node
__magic_name__ : str = {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
__magic_name__ : str = astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Optional[int] = self.fwd_astar.retrace_path(_a )
__magic_name__ : Union[str, Any] = self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
__magic_name__ : List[str] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case : Optional[int] = (0, 0)
snake_case : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case : List[str] = time.time()
snake_case : Union[str, Any] = AStar(init, goal)
snake_case : List[Any] = a_star.search()
snake_case : Optional[int] = time.time() - start_time
print(F"AStar execution time = {end_time:f} seconds")
snake_case : Optional[int] = time.time()
snake_case : List[Any] = BidirectionalAStar(init, goal)
snake_case : List[Any] = time.time() - bd_start_time
print(F"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 41
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__magic_name__ : int = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _snake_case )
if matches:
__magic_name__ : List[str] = float(matches[1] )
__magic_name__ : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ : List[str] = 1001
__magic_name__ : Tuple = "imagenet-1k-id2label.json"
__magic_name__ : Union[str, Any] = "huggingface/label-files"
__magic_name__ : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : Tuple = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
__magic_name__ : Dict = "background"
__magic_name__ : str = idalabel
__magic_name__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
__magic_name__ : List[Any] = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ : Dict = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
__magic_name__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
__magic_name__ : Any = model(**_snake_case )
__magic_name__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ : Optional[Any] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__magic_name__ : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing to the hub..." )
__magic_name__ : List[str] = "google/" + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
lowercase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
lowercase = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 1.0 , ):
return mean(
function_to_integrate(uniform(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) for _ in range(__SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 1.0 ):
def identity_function(__SCREAMING_SNAKE_CASE ) -> float:
return x
lowercase = area_under_curve_estimator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
def function_to_integrate(__SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
lowercase = area_under_curve_estimator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = r'\w+[.]\d+'
lowercase = re.findall(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for pat in pats:
lowercase = key.replace(__SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase = flax_model.init_weights(PRNGKey(__SCREAMING_SNAKE_CASE ) )
lowercase = flatten_dict(__SCREAMING_SNAKE_CASE )
lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
lowercase , lowercase = rename_key_and_reshape_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase = jnp.asarray(__SCREAMING_SNAKE_CASE )
return unflatten_dict(__SCREAMING_SNAKE_CASE )
| 195
| 1
|
"""simple docstring"""
import math
def lowercase ( a__ : int ) -> bool:
_UpperCamelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a__ )
def lowercase ( a__ : float = 1 / 12345 ) -> int:
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 3
while True:
_UpperCamelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a__ ):
_UpperCamelCase = int(a__ )
total_partitions += 1
if check_partition_perfect(a__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a__ )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 368
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''BlipImageProcessor'''
snake_case__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.image_processor
def __call__( self : Any , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ) -> str:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : List[str] ) -> Dict:
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Initialise PyTorch model
lowercase__ = BigBirdConfig.from_json_file(lowerCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowercase__ = BigBirdForQuestionAnswering(lowerCamelCase_ )
else:
lowercase__ = BigBirdForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCamelCase_ , lowerCamelCase_ , is_trivia_qa=lowerCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
A__ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 207
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : int = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[Any] = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
A__ : List[str] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = DistilBertTokenizer
def __init__( self : List[Any], lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : str=True, lowerCamelCase : Optional[int]="[UNK]", lowerCamelCase : Optional[Any]="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Any="[CLS]", lowerCamelCase : Union[str, Any]="[MASK]", lowerCamelCase : str=True, lowerCamelCase : int=None, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any]=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 207
| 1
|
import numpy
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__SCREAMING_SNAKE_CASE = numpy.random.rand(3 , 1 )
# Real output values provided.
__SCREAMING_SNAKE_CASE = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__SCREAMING_SNAKE_CASE = numpy.zeros(output_array.shape )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _A ( self , _A , _A , _A ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__SCREAMING_SNAKE_CASE = self.feedforward()
self.back_propagation()
if give_loss:
__SCREAMING_SNAKE_CASE = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_arr
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowercase ( a__ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def __lowercase ( a__ ) -> numpy.ndarray:
return (value) * (1 - (value))
def __lowercase ( ) -> int:
__SCREAMING_SNAKE_CASE = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__SCREAMING_SNAKE_CASE = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__SCREAMING_SNAKE_CASE = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = '''sew'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A=2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A=False , _A=128 , _A=16 , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A="mean" , _A=False , _A=False , _A=256 , _A=0 , _A=1 , _A=2 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 118
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.